* Kirti Wankhede <kwankh...@nvidia.com> [2016-11-05 02:40:43 +0530]: Hi Kirti,
[...] > static int vfio_dma_do_map(struct vfio_iommu *iommu, > struct vfio_iommu_type1_dma_map *map) > { > dma_addr_t iova = map->iova; > unsigned long vaddr = map->vaddr; > size_t size = map->size; > - long npage; > int ret = 0, prot = 0; > uint64_t mask; > struct vfio_dma *dma; > - unsigned long pfn; > + struct vfio_addr_space *addr_space; > + struct mm_struct *mm; > + bool free_addr_space_on_err = false; > > /* Verify that none of our __u64 fields overflow */ > if (map->size != size || map->vaddr != vaddr || map->iova != iova) > @@ -608,47 +685,56 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, > mutex_lock(&iommu->lock); > > if (vfio_find_dma(iommu, iova, size)) { > - mutex_unlock(&iommu->lock); > - return -EEXIST; > + ret = -EEXIST; > + goto do_map_err; > + } > + > + mm = get_task_mm(current); > + if (!mm) { > + ret = -ENODEV; > + goto do_map_err; > + } > + > + addr_space = vfio_find_addr_space(iommu, mm); > + if (addr_space) { > + atomic_inc(&addr_space->ref_count); > + mmput(mm); > + } else { > + addr_space = kzalloc(sizeof(*addr_space), GFP_KERNEL); > + if (!addr_space) { > + ret = -ENOMEM; No need to call (?): mmput(mm); > + goto do_map_err; > + } > + addr_space->mm = mm; > + atomic_set(&addr_space->ref_count, 1); > + list_add(&addr_space->next, &iommu->addr_space_list); > + free_addr_space_on_err = true; > } > > dma = kzalloc(sizeof(*dma), GFP_KERNEL); > if (!dma) { > - mutex_unlock(&iommu->lock); > - return -ENOMEM; > + if (free_addr_space_on_err) { > + mmput(mm); > + list_del(&addr_space->next); > + kfree(addr_space); > + } > + ret = -ENOMEM; > + goto do_map_err; > } > > dma->iova = iova; > dma->vaddr = vaddr; > dma->prot = prot; > + dma->addr_space = addr_space; > + get_task_struct(current); > + dma->task = current; > + dma->mlock_cap = capable(CAP_IPC_LOCK); > > /* Insert zero-sized and grow as we map chunks of it */ > vfio_link_dma(iommu, dma); > > - while (size) { > - /* Pin a contiguous chunk of memory */ > - npage = __vfio_pin_pages_remote(vaddr + dma->size, > - size >> PAGE_SHIFT, prot, &pfn); > - if (npage <= 0) { > - WARN_ON(!npage); > - ret = (int)npage; > - break; > - } > - > - /* Map it! */ > - ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot); > - if (ret) { > - __vfio_unpin_pages_remote(pfn, npage, prot, true); > - break; > - } > - > - size -= npage << PAGE_SHIFT; > - dma->size += npage << PAGE_SHIFT; > - } > - > - if (ret) > - vfio_remove_dma(iommu, dma); > - > + ret = vfio_pin_map_dma(iommu, dma, size); > +do_map_err: > mutex_unlock(&iommu->lock); > return ret; > } > -- > 2.7.0 > -- Dong Jia