The subbuf_ids field allows to point to a specific page from the
ring-buffer based on its ID. As a preparation or the upcoming
ring-buffer remote support, point this array to the buffer_page instead
of the buffer_data_page.

Signed-off-by: Vincent Donnefort <[email protected]>

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 048dde717b94..aeff2ce6231a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -519,7 +519,7 @@ struct ring_buffer_per_cpu {
        unsigned int                    mapped;
        unsigned int                    user_mapped;    /* user space mapping */
        struct mutex                    mapping_lock;
-       unsigned long                   *subbuf_ids;    /* ID to subbuf VA */
+       struct buffer_page              **subbuf_ids;   /* ID to subbuf VA */
        struct trace_buffer_meta        *meta_page;
        struct ring_buffer_cpu_meta     *ring_meta;
 
@@ -7002,7 +7002,7 @@ static void rb_free_meta_page(struct ring_buffer_per_cpu 
*cpu_buffer)
 }
 
 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
-                                  unsigned long *subbuf_ids)
+                                  struct buffer_page **subbuf_ids)
 {
        struct trace_buffer_meta *meta = cpu_buffer->meta_page;
        unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
@@ -7011,7 +7011,7 @@ static void rb_setup_ids_meta_page(struct 
ring_buffer_per_cpu *cpu_buffer,
        int id = 0;
 
        id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, id);
-       subbuf_ids[id++] = (unsigned long)cpu_buffer->reader_page->page;
+       subbuf_ids[id++] = cpu_buffer->reader_page;
        cnt++;
 
        first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
@@ -7021,7 +7021,7 @@ static void rb_setup_ids_meta_page(struct 
ring_buffer_per_cpu *cpu_buffer,
                if (WARN_ON(id >= nr_subbufs))
                        break;
 
-               subbuf_ids[id] = (unsigned long)subbuf->page;
+               subbuf_ids[id] = subbuf;
 
                rb_inc_page(&subbuf);
                id++;
@@ -7030,7 +7030,7 @@ static void rb_setup_ids_meta_page(struct 
ring_buffer_per_cpu *cpu_buffer,
 
        WARN_ON(cnt != nr_subbufs);
 
-       /* install subbuf ID to kern VA translation */
+       /* install subbuf ID to bpage translation */
        cpu_buffer->subbuf_ids = subbuf_ids;
 
        meta->meta_struct_len = sizeof(*meta);
@@ -7186,13 +7186,15 @@ static int __rb_map_vma(struct ring_buffer_per_cpu 
*cpu_buffer,
        }
 
        while (p < nr_pages) {
+               struct buffer_page *subbuf;
                struct page *page;
                int off = 0;
 
                if (WARN_ON_ONCE(s >= nr_subbufs))
                        return -EINVAL;
 
-               page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
+               subbuf = cpu_buffer->subbuf_ids[s];
+               page = virt_to_page((void *)subbuf->page);
 
                for (; off < (1 << (subbuf_order)); off++, page++) {
                        if (p >= nr_pages)
@@ -7219,7 +7221,8 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
                    struct vm_area_struct *vma)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned long flags, *subbuf_ids;
+       struct buffer_page **subbuf_ids;
+       unsigned long flags;
        int err;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -7243,7 +7246,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
        if (err)
                return err;
 
-       /* subbuf_ids include the reader while nr_pages does not */
+       /* subbuf_ids includes the reader while nr_pages does not */
        subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), 
GFP_KERNEL);
        if (!subbuf_ids) {
                rb_free_meta_page(cpu_buffer);
-- 
2.52.0.107.ga0afd4fd5b-goog


Reply via email to