amdgpu_virt_kiq_reg_write_reg_wait is hardcoded to use MEC engine 0.
Add xcc_inst as a parameter to allow it to use different MEC engines.

Signed-off-by: Victor Lu <victorchengchi...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c |  5 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |  3 ++-
 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c    | 10 +++++++---
 5 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 96857ae7fb5b..105a1fdff2f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -73,9 +73,10 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
                                        uint32_t reg0, uint32_t reg1,
-                                       uint32_t ref, uint32_t mask)
+                                       uint32_t ref, uint32_t mask,
+                                       uint32_t xcc_inst)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
        struct amdgpu_ring *ring = &kiq->ring;
        signed long r, cnt = 0;
        unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index fabb83e9d9ae..e9eb64e11c9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -330,7 +330,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
                                        uint32_t reg0, uint32_t rreg1,
-                                       uint32_t ref, uint32_t mask);
+                                       uint32_t ref, uint32_t mask,
+                                       uint32_t xcc_id);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index fa87a85e1017..56f50abcf9e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -346,7 +346,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device 
*adev, uint32_t vmid,
                u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                               1 << vmid);
+                               1 << vmid, 0);
 
                up_read(&adev->reset_domain->sem);
                return;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index e3b76fd28d15..3f6626f8c298 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -303,7 +303,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device 
*adev, uint32_t vmid,
                u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                               1 << vmid);
+                               1 << vmid, 0);
                return;
        }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 11bad5475b31..5b040011bb92 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -815,7 +815,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device 
*adev, uint32_t vmid,
 {
        bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
        const unsigned int eng = 17;
-       u32 j, inv_req, inv_req2, tmp;
+       u32 j, inv_req, inv_req2, tmp, inst;
        struct amdgpu_vmhub *hub;
 
        BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
@@ -844,14 +844,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device 
*adev, uint32_t vmid,
        /* This is necessary for a HW workaround under SRIOV as well
         * as GFXOFF under bare metal
         */
-       if (adev->gfx.kiq[0].ring.sched.ready &&
+       if (vmhub >= AMDGPU_MMHUB0(0))
+               inst = vmhub - AMDGPU_MMHUB0(0);
+       else
+               inst = vmhub;
+       if (adev->gfx.kiq[inst].ring.sched.ready &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
            down_read_trylock(&adev->reset_domain->sem)) {
                uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
                uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                                                  1 << vmid);
+                                                  1 << vmid, inst);
                up_read(&adev->reset_domain->sem);
                return;
        }
-- 
2.34.1

Reply via email to