This memory pool is created when the lttng-tracer module is loaded. It allocates 4 buffers of 4k on each CPU. These buffers are designed to allow tracepoint probes to temporarily store data that does not fit on the stack (during the code_pre and code_post phases). The memory is freed when the lttng-tracer module is unloaded.
This removes the need for dynamic allocation during the execution of tracepoint probes, which does not behave well on PREEMPT_RT kernel, even if the GFP_ATOMIC and GFP_NOWAIT flags are set. Signed-off-by: Julien Desfossez <[email protected]> --- Makefile | 3 +- lttng-abi.c | 9 ++ probes/lttng-tracepoint-event-impl.h | 1 + tp-mempool.c | 175 +++++++++++++++++++++++++++++++++++ tp-mempool.h | 48 ++++++++++ 5 files changed, 235 insertions(+), 1 deletion(-) create mode 100644 tp-mempool.c create mode 100644 tp-mempool.h diff --git a/Makefile b/Makefile index 2cd2df0..78f6661 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,8 @@ ifneq ($(KERNELRELEASE),) lttng-filter.o lttng-filter-interpreter.o \ lttng-filter-specialize.o \ lttng-filter-validator.o \ - probes/lttng-probe-user.o + probes/lttng-probe-user.o \ + tp-mempool.o ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),) lttng-tracer-objs += lttng-syscalls.o diff --git a/lttng-abi.c b/lttng-abi.c index d202b72..9c29612 100644 --- a/lttng-abi.c +++ b/lttng-abi.c @@ -56,6 +56,7 @@ #include <lttng-abi-old.h> #include <lttng-events.h> #include <lttng-tracer.h> +#include <tp-mempool.h> #include <lib/ringbuffer/frontend_types.h> /* @@ -1771,6 +1772,12 @@ int __init lttng_abi_init(void) wrapper_vmalloc_sync_all(); lttng_clock_ref(); + + ret = tp_mempool_init(); + if (ret) { + goto error; + } + lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL, <tng_fops, NULL); @@ -1784,6 +1791,7 @@ int __init lttng_abi_init(void) error: lttng_clock_unref(); + tp_mempool_destroy(); return ret; } @@ -1793,4 +1801,5 @@ void lttng_abi_exit(void) lttng_clock_unref(); if (lttng_proc_dentry) remove_proc_entry("lttng", NULL); + tp_mempool_destroy(); } diff --git a/probes/lttng-tracepoint-event-impl.h b/probes/lttng-tracepoint-event-impl.h index 61f1c2d..2a8fe58 100644 --- a/probes/lttng-tracepoint-event-impl.h +++ b/probes/lttng-tracepoint-event-impl.h @@ -34,6 +34,7 @@ #include <wrapper/rcu.h> #include <lttng-events.h> #include <lttng-tracer-core.h> +#include <tp-mempool.h> #define __LTTNG_NULL_STRING "(null)" diff --git a/tp-mempool.c b/tp-mempool.c new file mode 100644 index 0000000..38df26f --- /dev/null +++ b/tp-mempool.c @@ -0,0 +1,175 @@ +/* + * tp-mempool.c + * + * Copyright (C) 2018 Julien Desfossez <[email protected]> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/slab.h> +#include <linux/percpu.h> + +#include <tp-mempool.h> + +#define BUF_SIZE 4 * 1024 +#define NR_BUF_PER_CPU 4 + +struct tp_buf_entry { + int cpu; /* To make sure we return the entry to the right pool. */ + char buf[BUF_SIZE]; + struct list_head list; +}; + +struct per_cpu_buf { + struct list_head free_list; /* Free struct tp_buf_entry. */ + struct list_head allocated_list; /* Allocated struct tp_buf_entry. */ +}; + +static struct per_cpu_buf __percpu *pool = NULL; /* Per-cpu buffer. */ + +int tp_mempool_init(void) +{ + int ret, cpu; + + /* The pool is only supposed to be allocated once. */ + if (pool) { + WARN_ON_ONCE(1); + ret = -1; + goto end; + } + + pool = alloc_percpu(struct per_cpu_buf); + if (!pool) { + ret = -ENOMEM; + goto end; + } + + for_each_possible_cpu(cpu) { + int i; + struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu); + + INIT_LIST_HEAD(&cpu_buf->free_list); + INIT_LIST_HEAD(&cpu_buf->allocated_list); + + for (i = 0; i < NR_BUF_PER_CPU; i++) { + struct tp_buf_entry *entry; + + entry = kzalloc(sizeof(struct tp_buf_entry), + GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto error_free_pool; + } + entry->cpu = cpu; + list_add_tail(&entry->list, &cpu_buf->free_list); + } + } + + ret = 0; + goto end; + +error_free_pool: + tp_mempool_destroy(); +end: + return ret; +} + +void tp_mempool_destroy(void) +{ + int cpu; + + if (!pool) { + return; + } + + for_each_possible_cpu(cpu) { + struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu); + struct tp_buf_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) { + list_del(&entry->list); + kfree(entry); + } + if (!list_empty(&cpu_buf->allocated_list)) { + printk(KERN_WARNING "TP allocated memory pool not empty"); + list_for_each_entry_safe(entry, tmp, + &cpu_buf->allocated_list, list) { + list_del(&entry->list); + kfree(entry); + } + } + } + free_percpu(pool); + pool = NULL; +} + +void *tp_mempool_alloc(size_t size) +{ + void *ret; + struct tp_buf_entry *entry; + struct per_cpu_buf *cpu_buf; + int cpu = smp_processor_id(); + + if (size > BUF_SIZE) { + ret = NULL; + goto end; + } + + cpu_buf = per_cpu_ptr(pool, cpu); + if (list_empty(&cpu_buf->free_list)) { + ret = NULL; + goto end; + } + + entry = list_first_entry(&cpu_buf->free_list, struct tp_buf_entry, list); + /* Remove the entry from the free list. */ + list_del(&entry->list); + /* Add it to the allocated list for tracking. */ + list_add_tail(&entry->list, &cpu_buf->allocated_list); + + ret = (void *) entry->buf; + +end: + return ret; +} + +void tp_mempool_free(void *ptr) +{ + struct tp_buf_entry *entry; + struct per_cpu_buf *cpu_buf; + + if (!ptr) { + goto end; + } + + entry = container_of(ptr, struct tp_buf_entry, buf); + if (!entry) { + goto end; + } + memset(entry->buf, 0, BUF_SIZE); + + cpu_buf = per_cpu_ptr(pool, entry->cpu); + if (!cpu_buf) { + goto end; + } + + /* Remove the entry from the allocated list. */ + list_del(&entry->list); + /* Add it to the free list. */ + list_add_tail(&entry->list, &cpu_buf->free_list); + +end: + return; +} diff --git a/tp-mempool.h b/tp-mempool.h new file mode 100644 index 0000000..93c2239 --- /dev/null +++ b/tp-mempool.h @@ -0,0 +1,48 @@ +#ifndef LTTNG_TP_MEMPOOL_H +#define LTTNG_TP_MEMPOOL_H + +/* + * tp-mempool.h + * + * Copyright (C) 2018 Julien Desfossez <[email protected]> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/percpu.h> + +/* + * Initialize the pool, only performed once. + * Returns 0 on success, a negative value on error. + */ +int tp_mempool_init(void); + +/* + * Destroy the pool and free all the memory allocated. + */ +void tp_mempool_destroy(void); + +/* + * Ask for a buffer on the current cpu. + * Return a pointer to a buffer on success, NULL on error. + */ +void *tp_mempool_alloc(size_t size); + +/* + * Release the memory reserved. + */ +void tp_mempool_free(void *ptr); + +#endif /* LTTNG_TP_MEMPOOL_H */ -- 2.7.4 _______________________________________________ lttng-dev mailing list [email protected] https://lists.lttng.org/cgi-bin/mailman/listinfo/lttng-dev
