The function vduse_dev_alloc_coherent will be called under rwlock in next patches. Make it out of the lock to avoid increasing its fail rate.
Signed-off-by: Eugenio Pérez <[email protected]> --- v12: * Avoid free_pages_exact(NULL, size) in case vduse_domain_alloc_coherent fails (MST). v11: Remove duplicated call to free_pages_exact (Jason). --- drivers/vdpa/vdpa_user/iova_domain.c | 24 +++++++----------------- drivers/vdpa/vdpa_user/iova_domain.h | 5 ++--- drivers/vdpa/vdpa_user/vduse_dev.c | 17 +++++++++++------ 3 files changed, 20 insertions(+), 26 deletions(-) diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 309cd5a039d1..0a9f668467a8 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -493,17 +493,15 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, vduse_domain_free_iova(iovad, dma_addr, size); } -void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, - size_t size, dma_addr_t *dma_addr, - gfp_t flag) +dma_addr_t vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, + size_t size, void *orig) { struct iova_domain *iovad = &domain->consistent_iovad; unsigned long limit = domain->iova_limit; dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit); - void *orig = alloc_pages_exact(size, flag); - if (!iova || !orig) - goto err; + if (!iova) + return DMA_MAPPING_ERROR; spin_lock(&domain->iotlb_lock); if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1, @@ -514,17 +512,12 @@ void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, } spin_unlock(&domain->iotlb_lock); - *dma_addr = iova; + return iova; - return orig; err: - *dma_addr = DMA_MAPPING_ERROR; - if (orig) - free_pages_exact(orig, size); - if (iova) - vduse_domain_free_iova(iovad, iova, size); + vduse_domain_free_iova(iovad, iova, size); - return NULL; + return DMA_MAPPING_ERROR; } void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, @@ -533,7 +526,6 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, struct iova_domain *iovad = &domain->consistent_iovad; struct vhost_iotlb_map *map; struct vdpa_map_file *map_file; - phys_addr_t pa; spin_lock(&domain->iotlb_lock); map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr, @@ -545,12 +537,10 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, map_file = (struct vdpa_map_file *)map->opaque; fput(map_file->file); kfree(map_file); - pa = map->addr; vhost_iotlb_map_free(domain->iotlb, map); spin_unlock(&domain->iotlb_lock); vduse_domain_free_iova(iovad, dma_addr, size); - free_pages_exact(phys_to_virt(pa), size); } static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf) diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h index 081f06c52cdc..e50e55d1396f 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.h +++ b/drivers/vdpa/vdpa_user/iova_domain.h @@ -65,9 +65,8 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); -void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, - size_t size, dma_addr_t *dma_addr, - gfp_t flag); +dma_addr_t vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, + size_t size, void *orig); void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, dma_addr_t dma_addr, unsigned long attrs); diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 0e3cf5128ad0..6dba1f3224d9 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -916,23 +916,27 @@ static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size, { struct vduse_dev *vdev; struct vduse_iova_domain *domain; - unsigned long iova; void *addr; *dma_addr = DMA_MAPPING_ERROR; if (!token.group) return NULL; - vdev = token.group->dev; - domain = vdev->domain; - addr = vduse_domain_alloc_coherent(domain, size, - (dma_addr_t *)&iova, flag); + addr = alloc_pages_exact(size, flag); if (!addr) return NULL; - *dma_addr = (dma_addr_t)iova; + vdev = token.group->dev; + domain = vdev->domain; + *dma_addr = vduse_domain_alloc_coherent(domain, size, addr); + if (*dma_addr == DMA_MAPPING_ERROR) + goto err; return addr; + +err: + free_pages_exact(addr, size); + return NULL; } static void vduse_dev_free_coherent(union virtio_map token, size_t size, @@ -949,6 +953,7 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size, domain = vdev->domain; vduse_domain_free_coherent(domain, size, dma_addr, attrs); + free_pages_exact(vaddr, size); } static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr) -- 2.52.0

