to user buffer, wake up spm buffer waiting queue to return
data to user.

Signed-off-by: James Zhu <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c | 118 +++++++++++++++++++++++-
 1 file changed, 117 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c
index d84814069846..7acbd966cd3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c
@@ -59,20 +59,136 @@ struct amdgpu_spm_cntr {
        u32   have_users_buf_cnt;
        bool  are_users_buf_filled;
 };
+
 static int amdgpu_spm_release(struct amdgpu_spm_mgr *spm_mgr, struct drm_file 
*filp);
 static void _amdgpu_spm_release(struct amdgpu_spm_mgr *spm_mgr, int inst, 
struct drm_file *filp);
 
+static int amdgpu_spm_data_copy(struct amdgpu_spm_mgr *spm_mgr, u32 
size_to_copy, int inst)
+{
+       struct amdgpu_spm_base *spm = &(spm_mgr->spm_cntr->spm[inst]);
+       uint64_t __user *user_address;
+       uint64_t *ring_buf;
+       u32 user_buf_space_left;
+       int ret = 0;
+
+       if (spm->ubuf.user_addr == NULL)
+               return -EFAULT;
+
+       user_address = (uint64_t *)((uint64_t)spm->ubuf.user_addr + 
spm->size_copied);
+       /* From RLC spec, ring_rptr = 0 points to spm->cpu_addr + 0x20 */
+       ring_buf =  (uint64_t *)((uint64_t)spm->cpu_addr + spm->ring_rptr + 
0x20);
+
+       if (user_address == NULL)
+               return -EFAULT;
+
+       user_buf_space_left = spm->ubuf.ubufsize - spm->size_copied;
+
+       if (size_to_copy < user_buf_space_left) {
+               ret = copy_to_user(user_address, ring_buf, size_to_copy);
+               if (ret) {
+                       spm->has_data_loss = true;
+                       return -EFAULT;
+               }
+               spm->size_copied += size_to_copy;
+               spm->ring_rptr += size_to_copy;
+       } else {
+               ret = copy_to_user(user_address, ring_buf, user_buf_space_left);
+               if (ret) {
+                       spm->has_data_loss = true;
+                       return -EFAULT;
+               }
+
+               spm->size_copied = spm->ubuf.ubufsize;
+               spm->ring_rptr += user_buf_space_left;
+               spm->is_user_buf_filled = true;
+       }
+
+       return ret;
+}
+
+static int amdgpu_spm_read_ring_buffer(struct amdgpu_spm_mgr *spm_mgr, int 
inst)
+{
+       struct amdgpu_device *adev = mgr_to_adev(spm_mgr, spm_mgr);
+       struct amdgpu_spm_base *spm = &(spm_mgr->spm_cntr->spm[inst]);
+       u32 overflow_size = 0;
+       u32 size_to_copy;
+       int ret = 0;
+       u32 ring_wptr;
+
+       ring_wptr = READ_ONCE(spm->cpu_addr[0]);
+
+       /* SPM might stall if we cannot copy data out of SPM ringbuffer.
+        * spm->has_data_loss is only a hint here since stall is only a
+        * possibility and data loss might not happen. But it is a useful
+        * hint for user mode profiler to take extra actions.
+        */
+       if (!spm->has_user_buf || spm->is_user_buf_filled) {
+               spm->has_data_loss = true;
+               /* set flag due to there is no flag setup
+                * when read ring buffer timeout.
+                */
+               if (!spm->is_user_buf_filled)
+                       spm->is_user_buf_filled = true;
+               dev_dbg(adev->dev, "[SPM#%d] [%d|%d] rptr:0x%x--wptr:0x%x", 
inst,
+                       spm->has_user_buf, spm->is_user_buf_filled, 
spm->ring_rptr, ring_wptr);
+               goto exit;
+       }
+
+       if (spm->ring_rptr == ring_wptr)
+               goto exit;
+
+       if (ring_wptr > spm->ring_rptr) {
+               size_to_copy = ring_wptr - spm->ring_rptr;
+               ret = amdgpu_spm_data_copy(spm_mgr, size_to_copy, inst);
+       } else {
+               size_to_copy = spm->ring_size - spm->ring_rptr;
+               ret = amdgpu_spm_data_copy(spm_mgr, size_to_copy, inst);
+
+               /* correct counter start point */
+               if (spm->ring_size == spm->ring_rptr) {
+                       if (ring_wptr == 0) {
+                               /* reset rptr to start point of ring buffer */
+                               spm->ring_rptr = ring_wptr;
+                               goto exit;
+                       }
+                       spm->ring_rptr = 0;
+                       size_to_copy = ring_wptr - spm->ring_rptr;
+                       if (!ret)
+                               ret = amdgpu_spm_data_copy(spm_mgr, 
size_to_copy, inst);
+               }
+       }
+
+exit:
+       amdgpu_rlc_spm_set_rdptr(adev, inst, spm->ring_rptr);
+       return ret;
+}
 
 static void amdgpu_spm_work(struct work_struct *work)
 {
        struct amdgpu_spm_mgr *spm_mgr = container_of(work, struct 
amdgpu_spm_mgr, spm_work);
+       struct amdgpu_device *adev = mgr_to_adev(spm_mgr, spm_mgr);
        struct mm_struct *mm = NULL;
 
        mm = get_task_mm(spm_mgr->lead_thread);
        if (mm) {
                kthread_use_mm(mm);
                { /* attach mm */
-                       /* TODO: dump spm ring buffer to user buffer */
+                       int inst;
+
+                       mutex_lock(&spm_mgr->spm_cntr->spm_worker_mutex);
+                       WRITE_ONCE(spm_mgr->spm_cntr->are_users_buf_filled, 
false);
+                       for_each_inst(inst, AMDGPU_XCC_MASK(adev)) {
+                               struct amdgpu_spm_base *spm = 
&(spm_mgr->spm_cntr->spm[inst]);
+
+                               amdgpu_spm_read_ring_buffer(spm_mgr, inst);
+                               if (spm->is_user_buf_filled)
+                                       
WRITE_ONCE(spm_mgr->spm_cntr->are_users_buf_filled, true);
+                       }
+                       if (READ_ONCE(spm_mgr->spm_cntr->are_users_buf_filled)) 
{
+                               pr_debug("SPM wake up buffer work queue.");
+                               wake_up(&spm_mgr->spm_cntr->spm_buf_wq);
+                       }
+                       mutex_unlock(&spm_mgr->spm_cntr->spm_worker_mutex);
                } /* detach mm */
                kthread_unuse_mm(mm);
                /* release the mm structure */
-- 
2.34.1

Reply via email to