Replace adev->srbm_mutex with a spinlock adev->srbm_lock

v2: rebased on 4.12 and included gfx9
Signed-off-by: Andres Rodriguez <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h               |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |  4 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c |  4 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c        |  2 +-
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c             |  4 +--
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c             | 20 ++++++-------
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c             | 34 +++++++++++------------
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c             | 24 ++++++++--------
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c            |  4 +--
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c            |  4 +--
 10 files changed, 51 insertions(+), 51 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a9b7a61..68350ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1465,41 +1465,41 @@ struct amdgpu_device {
        enum amd_asic_type              asic_type;
        uint32_t                        family;
        uint32_t                        rev_id;
        uint32_t                        external_rev_id;
        unsigned long                   flags;
        int                             usec_timeout;
        const struct amdgpu_asic_funcs  *asic_funcs;
        bool                            shutdown;
        bool                            need_dma32;
        bool                            accel_working;
        struct work_struct              reset_work;
        struct notifier_block           acpi_nb;
        struct amdgpu_i2c_chan          *i2c_bus[AMDGPU_MAX_I2C_BUS];
        struct amdgpu_debugfs           debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
        unsigned                        debugfs_count;
 #if defined(CONFIG_DEBUG_FS)
        struct dentry                   
*debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
 #endif
        struct amdgpu_atif              atif;
        struct amdgpu_atcs              atcs;
-       struct mutex                    srbm_mutex;
+       spinlock_t                      srbm_lock;
        /* GRBM index mutex. Protects concurrent access to GRBM index */
        struct mutex                    grbm_idx_mutex;
        struct dev_pm_domain            vga_pm_domain;
        bool                            have_disp_power_ref;
 
        /* BIOS */
        bool                            is_atom_fw;
        uint8_t                         *bios;
        uint32_t                        bios_size;
        struct amdgpu_bo                *stollen_vga_memory;
        uint32_t                        bios_scratch_reg_offset;
        uint32_t                        bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
 
        /* Register/doorbell mmio */
        resource_size_t                 rmmio_base;
        resource_size_t                 rmmio_size;
        void __iomem                    *rmmio;
        /* protects concurrent MM_INDEX/DATA based register access */
        spinlock_t mmio_idx_lock;
        /* protects concurrent SMC based register access */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5254562..a009990 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -152,50 +152,50 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .write_vmid_invalidate_request = write_vmid_invalidate_request,
        .get_fw_version = get_fw_version
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
 {
        return (struct kfd2kgd_calls *)&kfd2kgd;
 }
 
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 {
        return (struct amdgpu_device *)kgd;
 }
 
 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
                        uint32_t queue, uint32_t vmid)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        WREG32(mmSRBM_GFX_CNTL, value);
 }
 
 static void unlock_srbm(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
        WREG32(mmSRBM_GFX_CNTL, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
                                uint32_t queue_id)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
        uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
        uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 
        lock_srbm(kgd, mec, pipe, queue_id, 0);
 }
 
 static void release_queue(struct kgd_dev *kgd)
 {
        unlock_srbm(kgd);
 }
 
 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
                                        uint32_t sh_mem_config,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 133d066..9698a7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -113,50 +113,50 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .write_vmid_invalidate_request = write_vmid_invalidate_request,
        .get_fw_version = get_fw_version
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
 {
        return (struct kfd2kgd_calls *)&kfd2kgd;
 }
 
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 {
        return (struct amdgpu_device *)kgd;
 }
 
 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
                        uint32_t queue, uint32_t vmid)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        WREG32(mmSRBM_GFX_CNTL, value);
 }
 
 static void unlock_srbm(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
        WREG32(mmSRBM_GFX_CNTL, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
                                uint32_t queue_id)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
        uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
        uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 
        lock_srbm(kgd, mec, pipe, queue_id, 0);
 }
 
 static void release_queue(struct kgd_dev *kgd)
 {
        unlock_srbm(kgd);
 }
 
 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
                                        uint32_t sh_mem_config,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2acceef..674256a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1841,41 +1841,41 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
        adev->didt_wreg = &amdgpu_invalid_wreg;
        adev->gc_cac_rreg = &amdgpu_invalid_rreg;
        adev->gc_cac_wreg = &amdgpu_invalid_wreg;
        adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
        adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
 
 
        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 
0x%04X:0x%04X 0x%02X).\n",
                 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
                 pdev->subsystem_vendor, pdev->subsystem_device, 
pdev->revision);
 
        /* mutex initialization are all done here so we
         * can recall function without having locking issues */
        atomic_set(&adev->irq.ih.lock, 0);
        mutex_init(&adev->firmware.mutex);
        mutex_init(&adev->pm.mutex);
        mutex_init(&adev->gfx.gpu_clock_mutex);
-       mutex_init(&adev->srbm_mutex);
+       spin_lock_init(&adev->srbm_lock);
        mutex_init(&adev->grbm_idx_mutex);
        mutex_init(&adev->mn_lock);
        hash_init(adev->mn_hash);
 
        amdgpu_check_arguments(adev);
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
        spin_lock_init(&adev->mmio_idx_lock);
        spin_lock_init(&adev->smc_idx_lock);
        spin_lock_init(&adev->pcie_idx_lock);
        spin_lock_init(&adev->uvd_ctx_idx_lock);
        spin_lock_init(&adev->didt_idx_lock);
        spin_lock_init(&adev->gc_cac_idx_lock);
        spin_lock_init(&adev->audio_endpt_idx_lock);
        spin_lock_init(&adev->mm_stats.lock);
 
        INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c216e16..fe462ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -372,50 +372,50 @@ static void cik_sdma_enable(struct amdgpu_device *adev, 
bool enable)
 /**
  * cik_sdma_gfx_resume - setup and start the async dma engines
  *
  * @adev: amdgpu_device pointer
  *
  * Set up the gfx DMA ring buffers and enable them (CIK).
  * Returns 0 for success, error for failure.
  */
 static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
        u32 rb_cntl, ib_cntl;
        u32 rb_bufsz;
        u32 wb_offset;
        int i, j, r;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                for (j = 0; j < 16; j++) {
                        cik_srbm_select(adev, 0, 0, 0, j);
                        /* SDMA GFX */
                        WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
                        WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
                        /* XXX SDMA RLC - todo */
                }
                cik_srbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
 
                WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
                       adev->gfx.config.gb_addr_config & 0x70);
 
                WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
                WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 
                /* Set ring buffer size in dwords */
                rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
                rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
                        SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
 #endif
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 85321d6..9e788e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1845,51 +1845,51 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device 
*adev)
  */
 #define DEFAULT_SH_MEM_BASES   (0x6000)
 #define FIRST_COMPUTE_VMID     (8)
 #define LAST_COMPUTE_VMID      (16)
 static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
 {
        int i;
        uint32_t sh_mem_config;
        uint32_t sh_mem_bases;
 
        /*
         * Configure apertures:
         * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
         * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
         * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
        */
        sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
        sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
        sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
                cik_srbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                WREG32(mmSH_MEM_CONFIG, sh_mem_config);
                WREG32(mmSH_MEM_APE1_BASE, 1);
                WREG32(mmSH_MEM_APE1_LIMIT, 0);
                WREG32(mmSH_MEM_BASES, sh_mem_bases);
        }
        cik_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static void gfx_v7_0_config_init(struct amdgpu_device *adev)
 {
        adev->gfx.config.double_offchip_lds_buf = 1;
 }
 
 /**
  * gfx_v7_0_gpu_init - setup the 3D engine
  *
  * @adev: amdgpu_device pointer
  *
  * Configures the 3D engine and tiling configuration
  * registers so that the 3D engine is usable.
  */
 static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
 {
        u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
        u32 tmp;
        int i;
@@ -1918,56 +1918,56 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device 
*adev)
         */
        gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
        sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
        sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE,
                                   MTYPE_NC);
        sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE,
                                   MTYPE_UC);
        sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
 
        sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
                                   SWIZZLE_ENABLE, 1);
        sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, 
SH_STATIC_MEM_CONFIG,
                                   ELEMENT_SIZE, 1);
        sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, 
SH_STATIC_MEM_CONFIG,
                                   INDEX_STRIDE, 3);
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
                if (i == 0)
                        sh_mem_base = 0;
                else
                        sh_mem_base = adev->mc.shared_aperture_start >> 48;
                cik_srbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
                WREG32(mmSH_MEM_APE1_BASE, 1);
                WREG32(mmSH_MEM_APE1_LIMIT, 0);
                WREG32(mmSH_MEM_BASES, sh_mem_base);
                WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
        }
        cik_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 
        gmc_v7_0_init_compute_vmid(adev);
 
        WREG32(mmSX_DEBUG_1, 0x20);
 
        WREG32(mmTA_CNTL_AUX, 0x00010000);
 
        tmp = RREG32(mmSPI_CONFIG_CNTL);
        tmp |= 0x03000000;
        WREG32(mmSPI_CONFIG_CNTL, tmp);
 
        WREG32(mmSQ_CONFIG, 1);
 
        WREG32(mmDB_DEBUG, 0);
 
        tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
        tmp |= 0x00000400;
        WREG32(mmDB_DEBUG2, tmp);
 
        tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
@@ -2950,60 +2950,60 @@ struct hqd_registers
        u32 cp_hqd_iq_rptr;
        u32 cp_hqd_dequeue_request;
        u32 cp_hqd_dma_offload;
        u32 cp_hqd_sema_cmd;
        u32 cp_hqd_msg_type;
        u32 cp_hqd_atomic0_preop_lo;
        u32 cp_hqd_atomic0_preop_hi;
        u32 cp_hqd_atomic1_preop_lo;
        u32 cp_hqd_atomic1_preop_hi;
        u32 cp_hqd_hq_scheduler0;
        u32 cp_hqd_hq_scheduler1;
        u32 cp_mqd_control;
 };
 
 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev, int me, int 
pipe)
 {
        u64 eop_gpu_addr;
        u32 tmp;
        size_t eop_offset = me * pipe * GFX7_MEC_HPD_SIZE * 2;
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
 
        cik_srbm_select(adev, me, pipe, 0, 0);
 
        /* write the EOP addr */
        WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
        WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
 
        /* set the VMID assigned */
        WREG32(mmCP_HPD_EOP_VMID, 0);
 
        /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
        tmp = RREG32(mmCP_HPD_EOP_CONTROL);
        tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
        tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
        WREG32(mmCP_HPD_EOP_CONTROL, tmp);
 
        cik_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
 {
        int i;
 
        /* disable the queue if it's active */
        if (RREG32(mmCP_HQD_ACTIVE) & 1) {
                WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
                for (i = 0; i < adev->usec_timeout; i++) {
                        if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
                                break;
                        udelay(1);
                }
 
                if (i == adev->usec_timeout)
                        return -ETIMEDOUT;
 
                WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
                WREG32(mmCP_HQD_PQ_RPTR, 0);
@@ -3176,49 +3176,49 @@ static int gfx_v7_0_compute_queue_init(struct 
amdgpu_device *adev, int ring_id)
                        return r;
                }
        }
 
        r = amdgpu_bo_reserve(ring->mqd_obj, false);
        if (unlikely(r != 0))
                goto out;
 
        r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
                        &mqd_gpu_addr);
        if (r) {
                dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
                goto out_unreserve;
        }
        r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
        if (r) {
                dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
                goto out_unreserve;
        }
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
 
        gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
        gfx_v7_0_mqd_deactivate(adev);
        gfx_v7_0_mqd_commit(adev, mqd);
 
        cik_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 
        amdgpu_bo_kunmap(ring->mqd_obj);
 out_unreserve:
        amdgpu_bo_unreserve(ring->mqd_obj);
 out:
        return 0;
 }
 
 /**
  * gfx_v7_0_cp_compute_resume - setup the compute queue registers
  *
  * @adev: amdgpu_device pointer
  *
  * Program the compute queues and test them to make sure they
  * are working.
  * Returns 0 for success, error for failure.
  */
 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 {
        int r, i, j;
@@ -5054,48 +5054,48 @@ static void gfx_v7_0_set_gfx_eop_interrupt_state(struct 
amdgpu_device *adev,
 }
 
 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device 
*adev,
                                                     int me, int pipe,
                                                     enum 
amdgpu_interrupt_state state)
 {
        /* Me 0 is for graphics and Me 2 is reserved for HW scheduling
         * So we should only really be configuring ME 1 i.e. MEC0
         */
        if (me != 1) {
                DRM_ERROR("Ignoring request to enable interrupts for invalid 
me:%d\n", me);
                return;
        }
 
        if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
                DRM_ERROR("Ignoring request to enable interrupts for invalid "
                                "me:%d pipe:%d\n", pipe, me);
                return;
        }
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        cik_srbm_select(adev, me, pipe, 0, 0);
 
        WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
                        state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
        cik_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *src,
                                             unsigned type,
                                             enum amdgpu_interrupt_state state)
 {
        u32 cp_int_cntl;
 
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
                cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
                cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
                WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
                break;
        case AMDGPU_IRQ_STATE_ENABLE:
                cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
                cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
                WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
                break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b9e0ded..6b9c7f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3923,116 +3923,116 @@ static void gfx_v8_0_init_compute_vmid(struct 
amdgpu_device *adev)
 {
        int i;
        uint32_t sh_mem_config;
        uint32_t sh_mem_bases;
 
        /*
         * Configure apertures:
         * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
         * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
         * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
         */
        sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
 
        sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
                        SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
                        SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
                        MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
                        SH_MEM_CONFIG__PRIVATE_ATC_MASK;
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
                vi_srbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                WREG32(mmSH_MEM_CONFIG, sh_mem_config);
                WREG32(mmSH_MEM_APE1_BASE, 1);
                WREG32(mmSH_MEM_APE1_LIMIT, 0);
                WREG32(mmSH_MEM_BASES, sh_mem_bases);
        }
        vi_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static void gfx_v8_0_config_init(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
        default:
                adev->gfx.config.double_offchip_lds_buf = 1;
                break;
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                adev->gfx.config.double_offchip_lds_buf = 0;
                break;
        }
 }
 
 static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 {
        u32 tmp, sh_static_mem_cfg;
        int i;
 
        WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
        WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
 
        gfx_v8_0_tiling_mode_table_init(adev);
        gfx_v8_0_setup_rb(adev);
        gfx_v8_0_get_cu_info(adev);
        gfx_v8_0_config_init(adev);
 
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
        sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
                                   SWIZZLE_ENABLE, 1);
        sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, 
SH_STATIC_MEM_CONFIG,
                                   ELEMENT_SIZE, 1);
        sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, 
SH_STATIC_MEM_CONFIG,
                                   INDEX_STRIDE, 3);
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
                vi_srbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                if (i == 0) {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, 
MTYPE_UC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, 
MTYPE_UC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        WREG32(mmSH_MEM_CONFIG, tmp);
                        WREG32(mmSH_MEM_BASES, 0);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, 
MTYPE_NC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, 
MTYPE_UC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        WREG32(mmSH_MEM_CONFIG, tmp);
                        tmp = adev->mc.shared_aperture_start >> 48;
                        WREG32(mmSH_MEM_BASES, tmp);
                }
 
                WREG32(mmSH_MEM_APE1_BASE, 1);
                WREG32(mmSH_MEM_APE1_LIMIT, 0);
                WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
        }
        vi_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 
        gfx_v8_0_init_compute_vmid(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        /*
         * making sure that the following register writes will be broadcasted
         * to all the shaders
         */
        gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        WREG32(mmPA_SC_FIFO_SIZE,
                   (adev->gfx.config.sc_prim_fifo_size_frontend <<
                        PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_prim_fifo_size_backend <<
                        PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_hiz_tile_fifo_size <<
                        PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
                        PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
 
@@ -5075,87 +5075,87 @@ int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
 }
 
 static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 {
        int r = 0;
        struct amdgpu_device *adev = ring->adev;
        struct vi_mqd *mqd = ring->mqd_ptr;
        int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
 
        gfx_v8_0_kiq_setting(ring);
 
        if (adev->gfx.in_reset) { /* for GPU_RESET case */
                /* reset MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], 
sizeof(*mqd));
 
                /* reset ring buffer */
                ring->wptr = 0;
                amdgpu_ring_clear_ring(ring);
 
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
                r = gfx_v8_0_deactivate_hqd(adev, 1);
                if (r) {
                        dev_err(adev->dev, "failed to deactivate ring %s\n", 
ring->name);
                        goto out_unlock;
                }
                gfx_v8_0_mqd_commit(adev, mqd);
                vi_srbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
        } else {
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
                gfx_v8_0_mqd_init(ring);
                r = gfx_v8_0_deactivate_hqd(adev, 1);
                if (r) {
                        dev_err(adev->dev, "failed to deactivate ring %s\n", 
ring->name);
                        goto out_unlock;
                }
                gfx_v8_0_mqd_commit(adev, mqd);
                vi_srbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
 
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, 
sizeof(*mqd));
        }
 
        return r;
 
 out_unlock:
        vi_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
        return r;
 }
 
 static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        struct vi_mqd *mqd = ring->mqd_ptr;
        int mqd_idx = ring - &adev->gfx.compute_ring[0];
 
        if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
                gfx_v8_0_mqd_init(ring);
                vi_srbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
 
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, 
sizeof(*mqd));
        } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
                /* reset MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], 
sizeof(*mqd));
 
                /* reset ring buffer */
                ring->wptr = 0;
                amdgpu_ring_clear_ring(ring);
        }
 
        return 0;
 }
 
 static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
 {
        if (adev->asic_type > CHIP_TONGA) {
                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
@@ -5450,45 +5450,45 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
        srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
        /* stop the rlc */
        gfx_v8_0_rlc_stop(adev);
 
        if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
                /* Disable GFX parsing/prefetching */
                gfx_v8_0_cp_gfx_enable(adev, false);
 
        if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
                int i;
 
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
-                       mutex_lock(&adev->srbm_mutex);
+                       spin_lock(&adev->srbm_lock);
                        vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 
0);
                        gfx_v8_0_deactivate_hqd(adev, 2);
                        vi_srbm_select(adev, 0, 0, 0, 0);
-                       mutex_unlock(&adev->srbm_mutex);
+                       spin_unlock(&adev->srbm_lock);
                }
                /* Disable MEC parsing/prefetching */
                gfx_v8_0_cp_compute_enable(adev, false);
        }
 
        return 0;
 }
 
 static int gfx_v8_0_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
        u32 tmp;
 
        if ((!adev->gfx.grbm_soft_reset) &&
            (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
        srbm_soft_reset = adev->gfx.srbm_soft_reset;
@@ -5550,45 +5550,45 @@ static int gfx_v8_0_post_soft_reset(void *handle)
        if ((!adev->gfx.grbm_soft_reset) &&
            (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
        srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
        if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
                gfx_v8_0_cp_gfx_resume(adev);
 
        if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
                int i;
 
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
-                       mutex_lock(&adev->srbm_mutex);
+                       spin_lock(&adev->srbm_lock);
                        vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 
0);
                        gfx_v8_0_deactivate_hqd(adev, 2);
                        vi_srbm_select(adev, 0, 0, 0, 0);
-                       mutex_unlock(&adev->srbm_mutex);
+                       spin_unlock(&adev->srbm_lock);
                }
                gfx_v8_0_kiq_resume(adev);
        }
        gfx_v8_0_rlc_start(adev);
 
        return 0;
 }
 
 /**
  * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
  *
  * @adev: amdgpu_device pointer
  *
  * Fetches a GPU clock counter snapshot.
  * Returns the 64 bit clock counter snapshot.
  */
 static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
        uint64_t clock;
 
@@ -6794,48 +6794,48 @@ static void gfx_v8_0_set_gfx_eop_interrupt_state(struct 
amdgpu_device *adev,
        WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
                     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device 
*adev,
                                                     int me, int pipe,
                                                     enum 
amdgpu_interrupt_state state)
 {
        /* Me 0 is reserved for graphics */
        if (me < 1 || me > adev->gfx.mec.num_mec) {
                DRM_ERROR("Ignoring request to enable interrupts for invalid 
me:%d\n", me);
                return;
        }
 
        if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
                DRM_ERROR("Ignoring request to enable interrupts for invalid "
                                "me:%d pipe:%d\n", pipe, me);
                return;
        }
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        vi_srbm_select(adev, me, pipe, 0, 0);
 
        WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
                        state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
        vi_srbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *source,
                                             unsigned type,
                                             enum amdgpu_interrupt_state state)
 {
        WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
                     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
        return 0;
 }
 
 static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
                                              unsigned type,
                                              enum amdgpu_interrupt_state state)
 {
        WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
                     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 57511cc..05fb51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1362,78 +1362,78 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device 
*adev)
 #define FIRST_COMPUTE_VMID     (8)
 #define LAST_COMPUTE_VMID      (16)
 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
 {
        int i;
        uint32_t sh_mem_config;
        uint32_t sh_mem_bases;
 
        /*
         * Configure apertures:
         * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
         * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
         * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
         */
        sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
 
        sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
                        SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
                soc15_grbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
                WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
        }
        soc15_grbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
 {
        u32 tmp;
        int i;
 
        WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
 
        gfx_v9_0_tiling_mode_table_init(adev);
 
        gfx_v9_0_setup_rb(adev);
        gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
 
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        for (i = 0; i < 16; i++) {
                soc15_grbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                tmp = 0;
                tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
                WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
        }
        soc15_grbm_select(adev, 0, 0, 0, 0);
 
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 
        gfx_v9_0_init_compute_vmid(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        /*
         * making sure that the following register writes will be broadcasted
         * to all the shaders
         */
        gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
                   (adev->gfx.config.sc_prim_fifo_size_frontend <<
                        PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_prim_fifo_size_backend <<
                        PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_hiz_tile_fifo_size <<
                        PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
                        PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
        mutex_unlock(&adev->grbm_idx_mutex);
@@ -2234,60 +2234,60 @@ static int gfx_v9_0_kiq_init_register(struct 
amdgpu_ring *ring)
 
        return 0;
 }
 
 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct v9_mqd *mqd = ring->mqd_ptr;
        bool is_kiq = (ring->funcs->type == AMDGPU_RING_TYPE_KIQ);
        int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
 
        if (is_kiq) {
                gfx_v9_0_kiq_setting(&kiq->ring);
        } else {
                mqd_idx = ring - &adev->gfx.compute_ring[0];
        }
 
        if (!adev->gfx.in_reset) {
                memset((void *)mqd, 0, sizeof(*mqd));
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
                gfx_v9_0_mqd_init(ring);
                if (is_kiq)
                        gfx_v9_0_kiq_init_register(ring);
                soc15_grbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
 
        } else { /* for GPU_RESET case */
                /* reset MQD to a clean status */
 
                /* reset ring buffer */
                ring->wptr = 0;
 
                if (is_kiq) {
-                   mutex_lock(&adev->srbm_mutex);
+                   spin_lock(&adev->srbm_lock);
                    soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 
0);
                    gfx_v9_0_kiq_init_register(ring);
                    soc15_grbm_select(adev, 0, 0, 0, 0);
-                   mutex_unlock(&adev->srbm_mutex);
+                   spin_unlock(&adev->srbm_lock);
                }
        }
 
        if (is_kiq)
                gfx_v9_0_kiq_enable(ring);
        else
                gfx_v9_0_map_queue_enable(&kiq->ring, ring);
 
        return 0;
 }
 
 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring = NULL;
        int r = 0, i;
 
        gfx_v9_0_cp_compute_enable(adev, true);
 
        ring = &adev->gfx.kiq.ring;
 
@@ -3316,48 +3316,48 @@ static void gfx_v9_0_set_gfx_eop_interrupt_state(struct 
amdgpu_device *adev,
                break;
        }
 }
 
 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device 
*adev,
                                                     int me, int pipe,
                                                     enum 
amdgpu_interrupt_state state)
 {
        /* Me 0 is reserved for graphics */
        if (me < 1 || me > adev->gfx.mec.num_mec) {
                DRM_ERROR("Ignoring request to enable interrupts for invalid 
me:%d\n", me);
                return;
        }
 
        if (pipe >= adev->gfx.mec.num_pipe_per_mec) {
                DRM_ERROR("Ignoring request to enable interrupts for invalid "
                                "me:%d pipe:%d\n", pipe, me);
                return;
        }
 
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        soc15_grbm_select(adev, me, pipe, 0, 0);
 
        WREG32_FIELD(CPC_INT_CNTL, TIME_STAMP_INT_ENABLE,
                        state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
        soc15_grbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 }
 
 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *source,
                                             unsigned type,
                                             enum amdgpu_interrupt_state state)
 {
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
        case AMDGPU_IRQ_STATE_ENABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                               PRIV_REG_INT_ENABLE,
                               state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                break;
        default:
                break;
        }
 
        return 0;
 }
@@ -3839,41 +3839,41 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
                return r;
        }
        r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
        if (r) {
                dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
                gfx_v9_0_cp_compute_fini(adev);
                return r;
        }
 
        /* init the mqd struct */
        memset(buf, 0, sizeof(struct v9_mqd));
 
        mqd = (struct v9_mqd *)buf;
        mqd->header = 0xC0310800;
        mqd->compute_pipelinestat_enable = 0x00000001;
        mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
        mqd->compute_misc_reserved = 0x00000003;
-       mutex_lock(&adev->srbm_mutex);
+       spin_lock(&adev->srbm_lock);
        soc15_grbm_select(adev, ring->me,
                               ring->pipe,
                               ring->queue, 0);
        /* disable wptr polling */
        WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
 
        /* write the EOP addr */
        BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases 
eop address */
        eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring->queue * 
GFX9_MEC_HPD_SIZE);
        eop_gpu_addr >>= 8;
 
        WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, 
lower_32_bits(eop_gpu_addr));
        WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, 
upper_32_bits(eop_gpu_addr));
        mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_gpu_addr);
        mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_gpu_addr);
 
        /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
        tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
                                    (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
@@ -3983,39 +3983,39 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
                mqd->cp_hqd_pq_doorbell_control);
 
        /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
        WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
        WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
 
        /* set the vmid for the queue */
        mqd->cp_hqd_vmid = 0;
        WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
        WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, tmp);
        mqd->cp_hqd_persistent_state = tmp;
 
        /* activate the queue */
        mqd->cp_hqd_active = 1;
        WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
 
        soc15_grbm_select(adev, 0, 0, 0, 0);
-       mutex_unlock(&adev->srbm_mutex);
+       spin_unlock(&adev->srbm_lock);
 
        amdgpu_bo_kunmap(ring->mqd_obj);
        amdgpu_bo_unreserve(ring->mqd_obj);
 
        if (use_doorbell)
                WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
 
        return 0;
 }
 
 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_GFX,
        .major = 9,
        .minor = 0,
        .rev = 0,
        .funcs = &gfx_v9_0_ip_funcs,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f2d0710..0e0e344 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -405,49 +405,49 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, 
bool enable)
 /**
  * sdma_v2_4_gfx_resume - setup and start the async dma engines
  *
  * @adev: amdgpu_device pointer
  *
  * Set up the gfx DMA ring buffers and enable them (VI).
  * Returns 0 for success, error for failure.
  */
 static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
        u32 rb_cntl, ib_cntl;
        u32 rb_bufsz;
        u32 wb_offset;
        int i, j, r;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                for (j = 0; j < 16; j++) {
                        vi_srbm_select(adev, 0, 0, 0, j);
                        /* SDMA GFX */
                        WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
                        WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
                }
                vi_srbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
 
                WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
                       adev->gfx.config.gb_addr_config & 0x70);
 
                WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 
                /* Set ring buffer size in dwords */
                rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, 
rb_bufsz);
 #ifdef __BIG_ENDIAN
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 
RB_SWAP_ENABLE, 1);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
                                        RPTR_WRITEBACK_SWAP_ENABLE, 1);
 #endif
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a69e5d4..f8a5da3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -599,49 +599,49 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, 
bool enable)
  *
  * @adev: amdgpu_device pointer
  *
  * Set up the gfx DMA ring buffers and enable them (VI).
  * Returns 0 for success, error for failure.
  */
 static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
        u32 rb_cntl, ib_cntl;
        u32 rb_bufsz;
        u32 wb_offset;
        u32 doorbell;
        int i, j, r;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
                amdgpu_ring_clear_ring(ring);
                wb_offset = (ring->rptr_offs * 4);
 
-               mutex_lock(&adev->srbm_mutex);
+               spin_lock(&adev->srbm_lock);
                for (j = 0; j < 16; j++) {
                        vi_srbm_select(adev, 0, 0, 0, j);
                        /* SDMA GFX */
                        WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
                        WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
                }
                vi_srbm_select(adev, 0, 0, 0, 0);
-               mutex_unlock(&adev->srbm_mutex);
+               spin_unlock(&adev->srbm_lock);
 
                WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
                       adev->gfx.config.gb_addr_config & 0x70);
 
                WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 
                /* Set ring buffer size in dwords */
                rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, 
rb_bufsz);
 #ifdef __BIG_ENDIAN
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 
RB_SWAP_ENABLE, 1);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
                                        RPTR_WRITEBACK_SWAP_ENABLE, 1);
 #endif
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
-- 
2.9.3

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to