From: Rob Clark <robdcl...@chromium.org>

So we can monitor how many pages are getting preallocated vs how many
get used.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_gpu_trace.h | 14 ++++++++++++++
 drivers/gpu/drm/msm/msm_iommu.c     |  4 ++++
 2 files changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h 
b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 7f863282db0d..781bbe5540bd 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -205,6 +205,20 @@ TRACE_EVENT(msm_gpu_preemption_irq,
                TP_printk("preempted to %u", __entry->ring_id)
 );
 
+TRACE_EVENT(msm_mmu_prealloc_cleanup,
+               TP_PROTO(u32 count, u32 remaining),
+               TP_ARGS(count, remaining),
+               TP_STRUCT__entry(
+                       __field(u32, count)
+                       __field(u32, remaining)
+                       ),
+               TP_fast_assign(
+                       __entry->count = count;
+                       __entry->remaining = remaining;
+                       ),
+               TP_printk("count=%u, remaining=%u", __entry->count, 
__entry->remaining)
+);
+
 #endif
 
 #undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index d04837461c3d..b5d019093380 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -8,6 +8,7 @@
 #include <linux/io-pgtable.h>
 #include <linux/kmemleak.h>
 #include "msm_drv.h"
+#include "msm_gpu_trace.h"
 #include "msm_mmu.h"
 
 struct msm_iommu {
@@ -346,6 +347,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, 
struct msm_mmu_preallo
        struct kmem_cache *pt_cache = get_pt_cache(mmu);
        uint32_t remaining_pt_count = p->count - p->ptr;
 
+       if (p->count > 0)
+               trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+
        kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
        kvfree(p->pages);
 }
-- 
2.49.0

Reply via email to