From: Jie Zhang <[email protected]> Add support for Adreno 612 GPU found in SM6150/QCS615 chipsets. A612 falls under ADRENO_6XX_GEN1 family and is a cut down version of A615 GPU.
A612 has a new IP called Reduced Graphics Management Unit or RGMU which is a small state machine which helps to toggle GX GDSC (connected to CX rail) to implement IFPC feature. It doesn't support any other features of a full fledged GMU like clock control, resource voting to rpmh etc. So we need linux clock driver support like other gmu-wrapper implementations to control gpu core clock and gpu GX gdsc. This patch skips RGMU core initialization and act more like a gmu-wrapper case. Signed-off-by: Jie Zhang <[email protected]> Signed-off-by: Akhil P Oommen <[email protected]> --- drivers/gpu/drm/msm/adreno/a6xx_catalog.c | 16 ++++++++++ drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 23 ++++++++++++++- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 45 +++++++++++++++++++++++------ drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 3 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 1 + drivers/gpu/drm/msm/adreno/adreno_gpu.h | 16 ++++++++-- 6 files changed, 90 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 44df6410bce1..5db01fa2ed44 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -705,6 +705,22 @@ static const struct adreno_info a6xx_gpus[] = { { 157, 3 }, { 127, 4 }, ), + }, { + .chip_ids = ADRENO_CHIP_IDS(0x06010200), + .family = ADRENO_6XX_GEN1, + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a612_rgmu.bin", + }, + .gmem = (SZ_128K + SZ_4K), + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a612_hwcg, + .protect = &a630_protect, + .gmu_cgc_mode = 0x00000022, + .prim_fifo_threshold = 0x00080000, + }, }, { .chip_ids = ADRENO_CHIP_IDS(0x06010500), .family = ADRENO_6XX_GEN1, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 4e6dc16e4a4c..d9ffe9e93ad9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -350,12 +350,18 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; u32 val; int request, ack; WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return 0; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; @@ -395,10 +401,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int bit; WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; @@ -1900,7 +1912,8 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) gmu->mmio = NULL; gmu->rscc = NULL; - if (!adreno_has_gmu_wrapper(adreno_gpu)) { + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { a6xx_gmu_memory_free(gmu); free_irq(gmu->gmu_irq, gmu); @@ -1942,6 +1955,13 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Mark legacy for manual SPTPRAC control */ gmu->legacy = true; + /* RGMU requires clocks */ + ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); + if (ret < 0) + goto err_clk; + + gmu->nr_clocks = ret; + /* Map the GMU registers */ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); if (IS_ERR(gmu->mmio)) { @@ -1981,6 +2001,7 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) err_mmio: iounmap(gmu->mmio); +err_clk: /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index b8f8ae940b55..779c1da7c46d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -612,15 +612,26 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) if (adreno_is_a630(adreno_gpu)) clock_cntl_on = 0x8aa8aa02; - else if (adreno_is_a610(adreno_gpu)) + else if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu)) clock_cntl_on = 0xaaa8aa82; else if (adreno_is_a702(adreno_gpu)) clock_cntl_on = 0xaaaaaa82; else clock_cntl_on = 0x8aa8aa82; - cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111; - cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555; + if (adreno_is_a612(adreno_gpu)) + cgc_delay = 0x11; + else if (adreno_is_a615_family(adreno_gpu)) + cgc_delay = 0x111; + else + cgc_delay = 0x10111; + + if (adreno_is_a612(adreno_gpu)) + cgc_hyst = 0x55; + else if (adreno_is_a615_family(adreno_gpu)) + cgc_hyst = 0x555; + else + cgc_hyst = 0x5555; gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0); @@ -714,6 +725,9 @@ static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu) cfg->ubwc_swizzle = 0x7; } + if (adreno_is_a612(gpu)) + cfg->highest_bank_bit = 14; + if (adreno_is_a618(gpu)) cfg->highest_bank_bit = 14; @@ -1288,7 +1302,7 @@ static int hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); /* Setting the mem pool size */ - if (adreno_is_a610(adreno_gpu)) { + if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); } else if (adreno_is_a702(adreno_gpu)) { @@ -1321,7 +1335,8 @@ static int hw_init(struct msm_gpu *gpu) a6xx_set_ubwc_config(gpu); /* Enable fault detection */ - if (adreno_is_a730(adreno_gpu) || + if (adreno_is_a612(adreno_gpu) || + adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); else if (adreno_is_a690(adreno_gpu)) @@ -1576,7 +1591,7 @@ static void a6xx_recover(struct msm_gpu *gpu) */ gpu->active_submits = 0; - if (adreno_has_gmu_wrapper(adreno_gpu)) { + if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu)) { /* Drain the outstanding traffic on memory buses */ a6xx_bus_clear_pending_transactions(adreno_gpu, true); @@ -2229,6 +2244,12 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) if (ret) goto err_bulk_clk; + ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); + if (ret) { + clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); + goto err_bulk_clk; + } + if (adreno_is_a619_holi(adreno_gpu)) a6xx_sptprac_enable(gmu); @@ -2242,8 +2263,10 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) err_set_opp: mutex_unlock(&a6xx_gpu->gmu.lock); - if (!ret) + if (!ret) { msm_devfreq_resume(gpu); + a6xx_llc_activate(a6xx_gpu); + } return ret; } @@ -2284,6 +2307,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) trace_msm_gpu_suspend(0); + a6xx_llc_deactivate(a6xx_gpu); + msm_devfreq_suspend(gpu); mutex_lock(&a6xx_gpu->gmu.lock); @@ -2295,6 +2320,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) a6xx_sptprac_disable(gmu); clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); pm_runtime_put_sync(gmu->gxpd); dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); @@ -2673,7 +2699,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4); else if (is_a7xx) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); - else if (adreno_has_gmu_wrapper(adreno_gpu)) + else if (adreno_has_gmu_wrapper(adreno_gpu) || + of_device_is_compatible(node, "qcom,adreno-rgmu")) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); else ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); @@ -2689,7 +2716,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu)) priv->gpu_clamp_to_idle = true; - if (adreno_has_gmu_wrapper(adreno_gpu)) + if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu)) ret = a6xx_gmu_wrapper_init(a6xx_gpu, node); else ret = a6xx_gmu_init(a6xx_gpu, node); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 4c7f3c642f6a..838150ff49ab 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1596,7 +1596,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) /* Get the generic state from the adreno core */ adreno_gpu_state_get(gpu, &a6xx_state->base); - if (!adreno_has_gmu_wrapper(adreno_gpu)) { + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { a6xx_get_gmu_registers(gpu, a6xx_state); a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 4b5a4edd0702..71400d8999c9 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -1189,6 +1189,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, /* Only handle the core clock when GMU is not in use (or is absent). */ if (adreno_has_gmu_wrapper(adreno_gpu) || + adreno_has_rgmu(adreno_gpu) || adreno_gpu->info->family < ADRENO_6XX_GEN1) { /* * This can only be done before devm_pm_opp_of_add_table(), or diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 390fa6720d9b..25ee6b277fe2 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -392,6 +392,16 @@ static inline int adreno_is_a610(const struct adreno_gpu *gpu) return adreno_is_revn(gpu, 610); } +static inline int adreno_is_a612(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06010200; +} + +static inline bool adreno_has_rgmu(const struct adreno_gpu *gpu) +{ + return adreno_is_a612(gpu); +} + static inline int adreno_is_a618(const struct adreno_gpu *gpu) { return adreno_is_revn(gpu, 618); @@ -466,9 +476,9 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu) { if (WARN_ON_ONCE(!gpu->info)) return false; - - /* TODO: A612 */ - return adreno_is_a610(gpu) || adreno_is_a702(gpu); + return adreno_is_a610(gpu) || + adreno_is_a612(gpu) || + adreno_is_a702(gpu); } /* TODO: 615/616 */ -- 2.51.0
