Note this commit also fixes a few typos in the commented
out portions which were missing closing parenthesis on the
original coding style.

Signed-off-by: Tom St Denis <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 176 +++++++++++++++++-----------------
 1 file changed, 88 insertions(+), 88 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 8dde83f7bd63..d2f52700d281 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -118,11 +118,11 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring 
*ring)
        struct amdgpu_device *adev = ring->adev;
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
+               return RREG32_SOC15(VCE, 0, mmVCE_RB_RPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
+               return RREG32_SOC15(VCE, 0, mmVCE_RB_RPTR2);
        else
-               return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
+               return RREG32_SOC15(VCE, 0, mmVCE_RB_RPTR3);
 }
 
 /**
@@ -140,11 +140,11 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring 
*ring)
                return adev->wb.wb[ring->wptr_offs];
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
+               return RREG32_SOC15(VCE, 0, mmVCE_RB_WPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
+               return RREG32_SOC15(VCE, 0, mmVCE_RB_WPTR2);
        else
-               return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
+               return RREG32_SOC15(VCE, 0, mmVCE_RB_WPTR3);
 }
 
 /**
@@ -166,14 +166,14 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring 
*ring)
        }
 
        if (ring == &adev->vce.ring[0])
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
-                       lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCE, 0, mmVCE_RB_WPTR,
+                            lower_32_bits(ring->wptr));
        else if (ring == &adev->vce.ring[1])
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
-                       lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCE, 0, mmVCE_RB_WPTR2,
+                            lower_32_bits(ring->wptr));
        else
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
-                       lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCE, 0, mmVCE_RB_WPTR3,
+                            lower_32_bits(ring->wptr));
 }
 
 static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
@@ -183,7 +183,7 @@ static int vce_v4_0_firmware_loaded(struct amdgpu_device 
*adev)
        for (i = 0; i < 10; ++i) {
                for (j = 0; j < 100; ++j) {
                        uint32_t status =
-                               RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
+                               RREG32_SOC15(VCE, 0, mmVCE_STATUS);
 
                        if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
                                return 0;
@@ -215,29 +215,29 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device 
*adev,
        size = header->header_size + header->vce_table_size + 
header->uvd_table_size;
 
        /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of 
memory descriptor location */
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), 
lower_32_bits(addr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), 
upper_32_bits(addr));
+       WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+       WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 
        /* 2, update vmid of descriptor */
-       data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
+       data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
        data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
        data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 
for MM scheduler */
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
+       WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 
        /* 3, notify mmsch about the size of this descriptor */
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
+       WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 
        /* 4, set resp to zero */
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
+       WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 
        /* 5, kick off the initialization and wait until 
VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 
0x10000001);
+       WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 
-       data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
+       data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
        loop = 1000;
        while ((data & 0x10000002) != 0x10000002) {
                udelay(10);
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_MMSCH_VF_MAILBOX_RESP));
+               data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
                loop--;
                if (!loop)
                        break;
@@ -360,27 +360,27 @@ static int vce_v4_0_start(struct amdgpu_device *adev)
 
        ring = &adev->vce.ring[0];
 
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), 
lower_32_bits(ring->wptr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), 
lower_32_bits(ring->wptr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), 
upper_32_bits(ring->gpu_addr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
+       WREG32_SOC15(VCE, 0, mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_BASE_LO, ring->gpu_addr);
+       WREG32_SOC15(VCE, 0, mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_SIZE, ring->ring_size / 4);
 
        ring = &adev->vce.ring[1];
 
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), 
lower_32_bits(ring->wptr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), 
lower_32_bits(ring->wptr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), 
upper_32_bits(ring->gpu_addr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
+       WREG32_SOC15(VCE, 0, mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_BASE_LO2, ring->gpu_addr);
+       WREG32_SOC15(VCE, 0, mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_SIZE2, ring->ring_size / 4);
 
        ring = &adev->vce.ring[2];
 
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), 
lower_32_bits(ring->wptr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), 
lower_32_bits(ring->wptr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), 
upper_32_bits(ring->gpu_addr));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
+       WREG32_SOC15(VCE, 0, mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_BASE_LO3, ring->gpu_addr);
+       WREG32_SOC15(VCE, 0, mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+       WREG32_SOC15(VCE, 0, mmVCE_RB_SIZE3, ring->ring_size / 4);
 
        vce_v4_0_mc_resume(adev);
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 
VCE_STATUS__JOB_BUSY_MASK,
@@ -602,44 +602,44 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, 
~0xFF9FF000);
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, 
~0x3F);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
+       WREG32_SOC15(VCE, 0, mmVCE_CLOCK_GATING_B, 0x1FF);
 
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_CTRL, 0x00398000);
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_SWAP_CNTL, 0);
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_SWAP_CNTL1, 0);
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_VM_CTRL, 0);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-               WREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
-                       (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 
8));
-               WREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
-                       (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 
40) & 0xff);
+               WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0,
+                            (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr 
>> 8));
+               WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0,
+                            (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr 
>> 40) & 0xff);
        } else {
-               WREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
-                       (adev->vce.gpu_addr >> 8));
-               WREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
-                       (adev->vce.gpu_addr >> 40) & 0xff);
+               WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0,
+                            (adev->vce.gpu_addr >> 8));
+               WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0,
+                            (adev->vce.gpu_addr >> 40) & 0xff);
        }
 
        offset = AMDGPU_VCE_FIRMWARE_OFFSET;
        size = VCE_V4_0_FW_SIZE;
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & 
~0x0f000000);
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
+       WREG32_SOC15(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0, offset & ~0x0f000000);
+       WREG32_SOC15(VCE, 0, mmVCE_VCPU_CACHE_SIZE0, size);
 
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), 
(adev->vce.gpu_addr >> 8));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), 
(adev->vce.gpu_addr >> 40) & 0xff);
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, 
(adev->vce.gpu_addr >> 8));
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1, 
(adev->vce.gpu_addr >> 40) & 0xff);
        offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + 
size : 0;
        size = VCE_V4_0_STACK_SIZE;
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & 
~0x0f000000) | (1 << 24));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
+       WREG32_SOC15(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1, (offset & ~0x0f000000) | 
(1 << 24));
+       WREG32_SOC15(VCE, 0, mmVCE_VCPU_CACHE_SIZE1, size);
 
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), 
(adev->vce.gpu_addr >> 8));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), 
(adev->vce.gpu_addr >> 40) & 0xff);
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, 
(adev->vce.gpu_addr >> 8));
+       WREG32_SOC15(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2, 
(adev->vce.gpu_addr >> 40) & 0xff);
        offset += size;
        size = VCE_V4_0_DATA_SIZE;
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & 
~0x0f000000) | (2 << 24));
-       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
+       WREG32_SOC15(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2, (offset & ~0x0f000000) | 
(2 << 24));
+       WREG32_SOC15(VCE, 0, mmVCE_VCPU_CACHE_SIZE2, size);
 
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
        WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
@@ -704,12 +704,12 @@ static bool vce_v4_0_check_soft_reset(void *handle)
         */
        mutex_lock(&adev->grbm_idx_mutex);
        WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
-       if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & 
AMDGPU_VCE_STATUS_BUSY_MASK) {
+       if (RREG32_SOC15(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 
SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 
SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
        WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
-       if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & 
AMDGPU_VCE_STATUS_BUSY_MASK) {
+       if (RREG32_SOC15(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 
SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 
SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
@@ -785,14 +785,14 @@ static void vce_v4_0_override_vce_clock_gating(struct 
amdgpu_device *adev, bool
 {
        u32 tmp, data;
 
-       tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
+       tmp = data = RREG32_SOC15(VCE, 0, mmVCE_RB_ARB_CTRL);
        if (override)
                data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
        else
                data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
 
        if (tmp != data)
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
+               WREG32_SOC15(VCE, 0, mmVCE_RB_ARB_CTRL, data);
 }
 
 static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
@@ -809,55 +809,55 @@ static void vce_v4_0_set_vce_sw_clock_gating(struct 
amdgpu_device *adev,
           fly as necessary.
        */
        if (gated) {
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
+               data = RREG32_SOC15(VCE, 0, mmVCE_CLOCK_GATING_B);
                data |= 0x1ff;
                data &= ~0xef0000;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
+               WREG32_SOC15(VCE, 0, mmVCE_CLOCK_GATING_B, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_CLOCK_GATING));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING);
                data |= 0x3ff000;
                data &= ~0xffc00000;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_CLOCK_GATING_2));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING_2);
                data |= 0x2;
                data &= ~0x00010000;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), 
data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING_2, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_REG_CLOCK_GATING));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING);
                data |= 0x37f;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 
data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_DMA_DCLK_CTRL));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL);
                data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
                        VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
                        VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
                        0x8;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), 
data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL, data);
        } else {
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
+               data = RREG32_SOC15(VCE, 0, mmVCE_CLOCK_GATING_B);
                data &= ~0x80010;
                data |= 0xe70008;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
+               WREG32_SOC15(VCE, 0, mmVCE_CLOCK_GATING_B, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_CLOCK_GATING));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING);
                data |= 0xffc00000;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_CLOCK_GATING_2));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING_2);
                data |= 0x10000;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), 
data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING_2, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_REG_CLOCK_GATING));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING);
                data &= ~0xffc00000;
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 
data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING, data);
 
-               data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_DMA_DCLK_CTRL));
+               data = RREG32_SOC15(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL);
                data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
                          VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
                          VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
                          0x8);
-               WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), 
data);
+               WREG32_SOC15(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL, data);
        }
        vce_v4_0_override_vce_clock_gating(adev, false);
 }
@@ -899,16 +899,16 @@ static int vce_v4_0_set_clockgating_state(void *handle,
 
                if (enable) {
                        /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
-                       uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_CLOCK_GATING_A);
+                       uint32_t data = RREG32_SOC15(VCE, 0, 
mmVCE_CLOCK_GATING_A);
                        data &= ~(0xf | 0xff0);
                        data |= ((0x0 << 0) | (0x04 << 4));
-                       WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, 
data);
+                       WREG32_SOC15(VCE, 0, mmVCE_CLOCK_GATING_A, data);
 
                        /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay 
*/
-                       data = RREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_CLOCK_GATING);
+                       data = RREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING);
                        data &= ~(0xf | 0xff0);
                        data |= ((0x0 << 0) | (0x04 << 4));
-                       WREG32(SOC15_REG_OFFSET(VCE, 0, 
mmVCE_UENC_CLOCK_GATING, data);
+                       WREG32_SOC15(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
                }
 
                vce_v4_0_set_vce_sw_clock_gating(adev, enable);
-- 
2.12.0

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to