As we are forced to use a global shared context on some PTA-equipped-but-broken GPUs, the fine-grained mutex locking in the current implemtnation of etnaviv_iommuv2_context_alloc() won't be meaningful any more.
Make the whole function to be protected by the global lock, in order to prevent reentrance when allocating global shared context. Signed-off-by: Icenowy Zheng <u...@icenowy.me> --- No changes in v2. drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index d664ae29ae209..5654a604c70cf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -272,20 +272,18 @@ etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global) struct etnaviv_iommuv2_context *v2_context; struct etnaviv_iommu_context *context; + mutex_lock(&global->lock); + v2_context = vzalloc(sizeof(*v2_context)); if (!v2_context) - return NULL; + goto out_mutex_unlock; - mutex_lock(&global->lock); v2_context->id = find_first_zero_bit(global->v2.pta_alloc, ETNAVIV_PTA_ENTRIES); - if (v2_context->id < ETNAVIV_PTA_ENTRIES) { + if (v2_context->id < ETNAVIV_PTA_ENTRIES) set_bit(v2_context->id, global->v2.pta_alloc); - } else { - mutex_unlock(&global->lock); + else goto out_free; - } - mutex_unlock(&global->lock); v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K, &v2_context->mtlb_dma, GFP_KERNEL); @@ -304,11 +302,14 @@ etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global) INIT_LIST_HEAD(&context->mappings); drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K); + mutex_unlock(&global->lock); return context; out_free_id: clear_bit(v2_context->id, global->v2.pta_alloc); out_free: vfree(v2_context); +out_mutex_unlock: + mutex_unlock(&global->lock); return NULL; } -- 2.51.0