In preparation for the ring-buffer memory mapping where each subbuf will
be accessible to user-space, zero all the page allocations.

Signed-off-by: Vincent Donnefort <vdonnef...@google.com>
Reviewed-by: Masami Hiramatsu (Google) <mhira...@kernel.org>

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fd4bfe3ecf01..ca796675c0a1 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1472,7 +1472,8 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu 
*cpu_buffer,
 
                list_add(&bpage->list, pages);
 
-               page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
+               page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
+                                       mflags | __GFP_ZERO,
                                        cpu_buffer->buffer->subbuf_order);
                if (!page)
                        goto free_pages;
@@ -1557,7 +1558,8 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long 
nr_pages, int cpu)
 
        cpu_buffer->reader_page = bpage;
 
-       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 
cpu_buffer->buffer->subbuf_order);
+       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
+                               cpu_buffer->buffer->subbuf_order);
        if (!page)
                goto fail_free_reader;
        bpage->page = page_address(page);
@@ -5525,7 +5527,8 @@ ring_buffer_alloc_read_page(struct trace_buffer *buffer, 
int cpu)
        if (bpage->data)
                goto out;
 
-       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
+       page = alloc_pages_node(cpu_to_node(cpu),
+                               GFP_KERNEL | __GFP_NORETRY | __GFP_ZERO,
                                cpu_buffer->buffer->subbuf_order);
        if (!page) {
                kfree(bpage);
-- 
2.43.0.594.gd9cf4e227d-goog


Reply via email to