diff options
author | Vincent Donnefort <vdonnefort@google.com> | 2024-06-28 11:46:11 +0100 |
---|---|---|
committer | Steven Rostedt (Google) <rostedt@goodmis.org> | 2024-08-26 13:42:23 -0400 |
commit | eb2dcde9f970ed8d3669444d47c8524b4bdf7d32 (patch) | |
tree | fce2721f068acd2461f8d315c4c2406383348a56 /kernel/trace/ring_buffer.c | |
parent | d0f2d6e9512ecf4306c4432761f04bd35cf9e3a6 (diff) | |
download | lwn-eb2dcde9f970ed8d3669444d47c8524b4bdf7d32.tar.gz lwn-eb2dcde9f970ed8d3669444d47c8524b4bdf7d32.zip |
ring-buffer: Align meta-page to sub-buffers for improved TLB usage
Previously, the mapped ring-buffer layout caused misalignment between
the meta-page and sub-buffers when the sub-buffer size was not a
multiple of PAGE_SIZE. This prevented hardware with larger TLB entries
from utilizing them effectively.
Add a padding with the zero-page between the meta-page and sub-buffers.
Also update the ring-buffer map_test to verify that padding.
Link: https://lore.kernel.org/20240628104611.1443542-1-vdonnefort@google.com
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c3a5e6cbb940..77dc0b25140e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -6852,10 +6852,10 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, /* install subbuf ID to kern VA translation */ cpu_buffer->subbuf_ids = subbuf_ids; - meta->meta_page_size = PAGE_SIZE; meta->meta_struct_len = sizeof(*meta); meta->nr_subbufs = nr_subbufs; meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; + meta->meta_page_size = meta->subbuf_size; rb_update_meta_page(cpu_buffer); } @@ -6949,6 +6949,12 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, !(vma->vm_flags & VM_MAYSHARE)) return -EPERM; + subbuf_order = cpu_buffer->buffer->subbuf_order; + subbuf_pages = 1 << subbuf_order; + + if (subbuf_order && pgoff % subbuf_pages) + return -EINVAL; + /* * Make sure the mapping cannot become writable later. Also tell the VM * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND). @@ -6958,11 +6964,8 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, lockdep_assert_held(&cpu_buffer->mapping_lock); - subbuf_order = cpu_buffer->buffer->subbuf_order; - subbuf_pages = 1 << subbuf_order; - nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ - nr_pages = ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */ + nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */ nr_vma_pages = vma_pages(vma); if (!nr_vma_pages || nr_vma_pages > nr_pages) @@ -6975,20 +6978,24 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, return -ENOMEM; if (!pgoff) { + unsigned long meta_page_padding; + pages[p++] = virt_to_page(cpu_buffer->meta_page); /* - * TODO: Align sub-buffers on their size, once - * vm_insert_pages() supports the zero-page. + * Pad with the zero-page to align the meta-page with the + * sub-buffers. */ - } else { - /* Skip the meta-page */ - pgoff--; + meta_page_padding = subbuf_pages - 1; + while (meta_page_padding-- && p < nr_pages) { + unsigned long __maybe_unused zero_addr = + vma->vm_start + (PAGE_SIZE * p); - if (pgoff % subbuf_pages) { - err = -EINVAL; - goto out; + pages[p++] = ZERO_PAGE(zero_addr); } + } else { + /* Skip the meta-page */ + pgoff -= subbuf_pages; s += pgoff / subbuf_pages; } |