This patch uses the PID of the process being traced to allocate and free
ETR memory buffers for CPU-wide scenarios.  The implementation is tailored
to handle both N:1 and 1:1 source/sink HW topologies.

Signed-off-by: Mathieu Poirier <mathieu.poir...@linaro.org>
---
 .../hwtracing/coresight/coresight-tmc-etr.c   | 107 +++++++++++++++++-
 1 file changed, 104 insertions(+), 3 deletions(-)

diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c 
b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 1346474ac019..61110ef41d00 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -8,6 +8,8 @@
 #include <linux/coresight.h>
 #include <linux/dma-mapping.h>
 #include <linux/iommu.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
 #include <linux/refcount.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -26,6 +28,7 @@ struct etr_flat_buf {
 
 /*
  * etr_perf_buffer - Perf buffer used for ETR
+ * @drvdata            - The ETR drvdaga this buffer has been allocated for.
  * @etr_buf            - Actual buffer used by the ETR
  * @pid                        - The PID this etr_perf_buffer belongs to.
  * @snaphost           - Perf session mode
@@ -34,6 +37,7 @@ struct etr_flat_buf {
  * @pages              - Array of Pages in the ring buffer.
  */
 struct etr_perf_buffer {
+       struct tmc_drvdata      *drvdata;
        struct etr_buf          *etr_buf;
        pid_t                   pid;
        bool                    snapshot;
@@ -1210,6 +1214,72 @@ alloc_etr_buf(struct tmc_drvdata *drvdata, struct 
perf_event *event,
        return etr_buf;
 }
 
+static struct etr_buf *
+get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
+                         struct perf_event *event, int nr_pages,
+                         void **pages, bool snapshot)
+{
+       int ret;
+       pid_t pid = task_pid_nr(event->owner);
+       struct etr_buf *etr_buf;
+
+retry:
+       /*
+        * An etr_perf_buffer is associated with an event and holds a reference
+        * to the AUX ring buffer that was created for that event.  In CPU-wide
+        * N:1 mode multiple events (one per CPU), each with its own AUX ring
+        * buffer, share a sink.  As such an etr_perf_buffer is created for each
+        * event but a single etr_buf associated with the ETR is shared between
+        * them.  The last event in a trace session will copy the content of the
+        * etr_buf to its AUX ring buffer.  Ring buffer associated to other
+        * events are simply not used an freed as events are destoyed.  We still
+        * need to allocate a ring buffer for each event since we don't know
+        * which event will be last.
+        */
+
+       /*
+        * The first thing to do here is check if an etr_buf has already been
+        * allocated for this session.  If so it is shared with this event,
+        * otherwise it is created.
+        */
+       mutex_lock(&drvdata->idr_mutex);
+       etr_buf = idr_find(&drvdata->idr, pid);
+       if (etr_buf) {
+               refcount_inc(&etr_buf->refcount);
+               mutex_unlock(&drvdata->idr_mutex);
+               return etr_buf;
+       }
+
+       /* If we made it here no buffer has been allocated, do so now. */
+       mutex_unlock(&drvdata->idr_mutex);
+
+       etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+       if (IS_ERR(etr_buf))
+               return etr_buf;
+
+       refcount_set(&etr_buf->refcount, 1);
+
+       /* Now that we have a buffer, add it to the IDR. */
+       mutex_lock(&drvdata->idr_mutex);
+       ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
+       mutex_unlock(&drvdata->idr_mutex);
+
+       /* Another event with this session ID has allocated this buffer. */
+       if (ret == -ENOSPC) {
+               tmc_free_etr_buf(etr_buf);
+               goto retry;
+       }
+
+       /* The IDR can't allocate room for a new session, abandon ship. */
+       if (ret == -ENOMEM) {
+               tmc_free_etr_buf(etr_buf);
+               return ERR_PTR(ret);
+       }
+
+
+       return etr_buf;
+}
+
 static struct etr_buf *
 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
                            struct perf_event *event, int nr_pages,
@@ -1238,7 +1308,8 @@ get_perf_etr_buf(struct tmc_drvdata *drvdata, struct 
perf_event *event,
                return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
                                                   pages, snapshot);
 
-       return ERR_PTR(-ENOENT);
+       return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
+                                        pages, snapshot);
 }
 
 static struct etr_perf_buffer *
@@ -1265,7 +1336,13 @@ tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, 
struct perf_event *event,
        return ERR_PTR(-ENOMEM);
 
 done:
+       /*
+        * Keep a reference to the ETR this buffer has been allocated for
+        * in order to have access to the IDR in tmc_free_etr_buffer().
+        */
+       etr_perf->drvdata = drvdata;
        etr_perf->etr_buf = etr_buf;
+
        return etr_perf;
 }
 
@@ -1295,9 +1372,33 @@ static void *tmc_alloc_etr_buffer(struct 
coresight_device *csdev,
 static void tmc_free_etr_buffer(void *config)
 {
        struct etr_perf_buffer *etr_perf = config;
+       struct tmc_drvdata *drvdata = etr_perf->drvdata;
+       struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
+
+       if (!etr_buf)
+               goto free_etr_perf_buffer;
+
+       mutex_lock(&drvdata->idr_mutex);
+       /* If we are not the last one to use the buffer, don't touch it. */
+       if (!refcount_dec_and_test(&etr_buf->refcount)) {
+               mutex_unlock(&drvdata->idr_mutex);
+               goto free_etr_perf_buffer;
+       }
+
+       /* We are the last one, remove from the IDR and free the buffer. */
+       buf = idr_remove(&drvdata->idr, etr_perf->pid);
+       mutex_unlock(&drvdata->idr_mutex);
+
+       /*
+        * Something went very wrong if the buffer associated with this ID
+        * is not the same in the IDR.  Leak to avoid use after free.
+        */
+       if (buf && WARN_ON(buf != etr_buf))
+               goto free_etr_perf_buffer;
+
+       tmc_free_etr_buf(etr_perf->etr_buf);
 
-       if (etr_perf->etr_buf)
-               tmc_free_etr_buf(etr_perf->etr_buf);
+free_etr_perf_buffer:
        kfree(etr_perf);
 }
 
-- 
2.17.1

Reply via email to