Because DMA #0 is now used by the user, remove the limitation of credits
from this channel. Without this patch, this channel is pretty much
unusable due to its very low bandwidth configuration.

Signed-off-by: Oded Gabbay <oded.gab...@gmail.com>
---
 drivers/misc/habanalabs/goya/goya.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/misc/habanalabs/goya/goya.c 
b/drivers/misc/habanalabs/goya/goya.c
index ea979ebd62fb..3c509e19d69d 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1688,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device 
*hdev)
 
        /*
         * Workaround for H2 #HW-23 bug
-        * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
-        * to 16 on KMD DMA
-        * We need to limit only these DMAs because the user can only read
+        * Set DMA max outstanding read requests to 240 on DMA CH 1.
+        * This limitation is still large enough to not affect Gen4 bandwidth.
+        * We need to only limit that DMA channel because the user can only read
         * from Host using DMA CH 1
         */
-       WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
        WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
 
        goya->hw_cap_initialized |= HW_CAP_GOLDEN;
@@ -3693,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device 
*hdev,
         * WA for HW-23.
         * We can't allow user to read from Host using QMANs other than 1.
         */
-       if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+       if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
                hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
                                le32_to_cpu(user_dma_pkt->tsize),
                                hdev->asic_prop.va_space_host_start_address,
-- 
2.17.1

Reply via email to