to stop SPM HW, flush SPM work queue, wake up all waiting queue
and free SPM ring buffer

Signed-off-by: James Zhu <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c | 52 +++++++++++++++++++++++--
 1 file changed, 49 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c
index 4b2c7f7b494b..d84814069846 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_spm.c
@@ -180,14 +180,57 @@ static int amdgpu_spm_acquire(struct amdgpu_spm_mgr 
*spm_mgr, struct drm_file *f
 
 static void _amdgpu_spm_release(struct amdgpu_spm_mgr *spm_mgr, int inst, 
struct drm_file *filp)
 {
-       /* TODO: */
+       struct amdgpu_device *adev = mgr_to_adev(spm_mgr, spm_mgr);
+       struct amdgpu_spm_base *spm = &(spm_mgr->spm_cntr->spm[inst]);
+       unsigned long flags;
+
+       if (!spm->ring_size)
+               return;
+       amdgpu_rlc_spm_release(adev, inst, drm_priv_to_vm(filp));
+       amdgpu_vm_free_gtt_mem(adev, &(spm->spm_obj));
+
+       spin_lock_irqsave(&spm_mgr->spm_irq_lock, flags);
+       memset(spm, 0, sizeof(*spm));
+       spin_unlock_irqrestore(&spm_mgr->spm_irq_lock, flags);
 
+       --spm_mgr->spm_cntr->spm_use_cnt;
 }
 
 static int amdgpu_spm_release(struct amdgpu_spm_mgr *spm_mgr, struct drm_file 
*filp)
 {
-       /* TODO */
-       return 0;
+       struct amdgpu_device *adev = mgr_to_adev(spm_mgr, spm_mgr);
+       unsigned long flags;
+       int inst;
+       int ret = 0;
+
+       mutex_lock(&(to_prof_xcp_mgr(spm_mgr, spm_mgr)->mutex));
+       if (!spm_mgr->spm_cntr) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       for_each_inst(inst, AMDGPU_XCC_MASK(adev)) {
+               spin_lock_irqsave(&spm_mgr->spm_irq_lock, flags);
+               spm_mgr->spm_cntr->spm[inst].is_spm_started = false;
+               spin_unlock_irqrestore(&spm_mgr->spm_irq_lock, flags);
+               amdgpu_rlc_spm_cntl(adev, inst, 0);
+       }
+       flush_work(&spm_mgr->spm_work);
+       wake_up_all(&spm_mgr->spm_cntr->spm_buf_wq);
+
+       for_each_inst(inst, AMDGPU_XCC_MASK(adev))
+               _amdgpu_spm_release(spm_mgr, inst, filp);
+       amdgpu_vmid_free_reserved(adev, drm_priv_to_vm(filp), AMDGPU_GFXHUB(0));
+
+       spin_lock_irqsave(&spm_mgr->spm_irq_lock, flags);
+       mutex_destroy(&(spm_mgr->spm_cntr->spm_worker_mutex));
+       kfree(spm_mgr->spm_cntr);
+       spm_mgr->spm_cntr = NULL;
+       spin_unlock_irqrestore(&spm_mgr->spm_irq_lock, flags);
+
+out:
+       mutex_unlock(&(to_prof_xcp_mgr(spm_mgr, spm_mgr)->mutex));
+       return ret;
 }
 
 static int spm_update_dest_info(struct amdgpu_spm_mgr *spm_mgr,
@@ -394,6 +437,9 @@ int amdgpu_spm_ioctl(struct drm_device *dev, void __user 
*data,
        case AMDGPU_SPM_OP_ACQUIRE:
                return  amdgpu_spm_acquire(spm_mgr, filp);
 
+       case AMDGPU_SPM_OP_RELEASE:
+               return  amdgpu_spm_release(spm_mgr, filp);
+
        case AMDGPU_SPM_OP_SET_DEST_BUF:
                return  amdgpu_set_dest_buffer(spm_mgr, data);
 
-- 
2.34.1

Reply via email to