Re: [PATCH 1/5] swiotlb: Introduce concept of swiotlb_pool

2020-04-28 Thread kbuild test robot
Hi Srivatsa,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on vhost/linux-next]
[also build test ERROR on xen-tip/linux-next linus/master v5.7-rc3 
next-20200428]
[cannot apply to swiotlb/linux-next]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:
https://github.com/0day-ci/linux/commits/Srivatsa-Vaddagiri/virtio-on-Type-1-hypervisor/20200429-032334
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
config: x86_64-defconfig (attached as .config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot 

All errors (new ones prefixed by >>):

   drivers/iommu/intel-iommu.c: In function 'bounce_map_single':
>> drivers/iommu/intel-iommu.c:3990:24: error: 'io_tlb_start' undeclared (first 
>> use in this function); did you mean 'swiotlb_start'?
__phys_to_dma(dev, io_tlb_start),
   ^~~~
   swiotlb_start
   drivers/iommu/intel-iommu.c:3990:24: note: each undeclared identifier is 
reported only once for each function it appears in

vim +3990 drivers/iommu/intel-iommu.c

cfb94a372f2d4e Lu Baolu 2019-09-06  3941  
cfb94a372f2d4e Lu Baolu 2019-09-06  3942  static dma_addr_t
cfb94a372f2d4e Lu Baolu 2019-09-06  3943  bounce_map_single(struct device 
*dev, phys_addr_t paddr, size_t size,
cfb94a372f2d4e Lu Baolu 2019-09-06  3944  enum 
dma_data_direction dir, unsigned long attrs,
cfb94a372f2d4e Lu Baolu 2019-09-06  3945  u64 dma_mask)
cfb94a372f2d4e Lu Baolu 2019-09-06  3946  {
cfb94a372f2d4e Lu Baolu 2019-09-06  3947size_t aligned_size = 
ALIGN(size, VTD_PAGE_SIZE);
cfb94a372f2d4e Lu Baolu 2019-09-06  3948struct dmar_domain *domain;
cfb94a372f2d4e Lu Baolu 2019-09-06  3949struct intel_iommu *iommu;
cfb94a372f2d4e Lu Baolu 2019-09-06  3950unsigned long iova_pfn;
cfb94a372f2d4e Lu Baolu 2019-09-06  3951unsigned long nrpages;
cfb94a372f2d4e Lu Baolu 2019-09-06  3952phys_addr_t tlb_addr;
cfb94a372f2d4e Lu Baolu 2019-09-06  3953int prot = 0;
cfb94a372f2d4e Lu Baolu 2019-09-06  3954int ret;
cfb94a372f2d4e Lu Baolu 2019-09-06  3955  
a11bfde9c77df1 Joerg Roedel 2020-02-17  3956if 
(unlikely(attach_deferred(dev)))
a11bfde9c77df1 Joerg Roedel 2020-02-17  3957do_deferred_attach(dev);
a11bfde9c77df1 Joerg Roedel 2020-02-17  3958  
96d170f3b1a607 Joerg Roedel 2020-02-17  3959domain = find_domain(dev);
a11bfde9c77df1 Joerg Roedel 2020-02-17  3960  
cfb94a372f2d4e Lu Baolu 2019-09-06  3961if (WARN_ON(dir == DMA_NONE || 
!domain))
cfb94a372f2d4e Lu Baolu 2019-09-06  3962return 
DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06  3963  
cfb94a372f2d4e Lu Baolu 2019-09-06  3964iommu = 
domain_get_iommu(domain);
cfb94a372f2d4e Lu Baolu 2019-09-06  3965if (WARN_ON(!iommu))
cfb94a372f2d4e Lu Baolu 2019-09-06  3966return 
DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06  3967  
cfb94a372f2d4e Lu Baolu 2019-09-06  3968nrpages = aligned_nrpages(0, 
size);
cfb94a372f2d4e Lu Baolu 2019-09-06  3969iova_pfn = 
intel_alloc_iova(dev, domain,
cfb94a372f2d4e Lu Baolu 2019-09-06  3970
dma_to_mm_pfn(nrpages), dma_mask);
cfb94a372f2d4e Lu Baolu 2019-09-06  3971if (!iova_pfn)
cfb94a372f2d4e Lu Baolu 2019-09-06  3972return 
DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06  3973  
cfb94a372f2d4e Lu Baolu 2019-09-06  3974/*
cfb94a372f2d4e Lu Baolu 2019-09-06  3975 * Check if DMAR supports 
zero-length reads on write only
cfb94a372f2d4e Lu Baolu 2019-09-06  3976 * mappings..
cfb94a372f2d4e Lu Baolu 2019-09-06  3977 */
cfb94a372f2d4e Lu Baolu 2019-09-06  3978if (dir == DMA_TO_DEVICE || dir 
== DMA_BIDIRECTIONAL ||
cfb94a372f2d4e Lu Baolu 2019-09-06  3979
!cap_zlr(iommu->cap))
cfb94a372f2d4e Lu Baolu 2019-09-06  3980prot |= DMA_PTE_READ;
cfb94a372f2d4e Lu Baolu 2019-09-06  3981if (dir == DMA_FROM_DEVICE || 
dir == DMA_BIDIRECTIONAL)
cfb94a372f2d4e Lu Baolu 2019-09-06  3982prot |= DMA_PTE_WRITE;
cfb94a372f2d4e Lu Baolu 2019-09-06  3983  
cfb94a372f2d4e Lu Baolu 2019-09-06  3984/*
cfb94a372f2d4e Lu Baolu 2019-09-06  3985 * If both the physical buffer 
start address and size are
cfb94a372f2d4e Lu Baolu 2019-09-06  3986 * page aligned, we don't need 
to use a bounce page.
cfb94a372f2d4e Lu Baolu 2019-09-06  3987 */
cfb94a372f2d4e Lu Baolu 

[PATCH 1/5] swiotlb: Introduce concept of swiotlb_pool

2020-04-28 Thread Srivatsa Vaddagiri
Currently swiotlb driver manages a global pool of memory which
acts as bounce buffers for memory that is not accessible to some
devices. The core functions provides by this driver to
allocate/free/bounce memory chunks will be more
useful if this driver can manage more than one pool. An immediate
application of such extension to swiotlb driver is to bounce
virtio buffers between private and shared space of a VM.

This patch introduces the concept of a swiotlb memory pool and
reorganizes current driver to work with the default global pool.
There is no functional change introduced by this patch.
Subsequent patches allow the swiotlb driver to work with more
than one pool of memory.

Signed-off-by: Srivatsa Vaddagiri 
---
 drivers/xen/swiotlb-xen.c |   4 +-
 include/linux/swiotlb.h   | 129 -
 kernel/dma/swiotlb.c  | 359 +++---
 3 files changed, 307 insertions(+), 185 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index b6d2776..c2dc9c8 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -190,8 +190,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
/*
 * IO TLB memory already allocated. Just use it.
 */
-   if (io_tlb_start != 0) {
-   xen_io_tlb_start = phys_to_virt(io_tlb_start);
+   if (swiotlb_start()) {
+   xen_io_tlb_start = phys_to_virt(swiotlb_start());
goto end;
}
 
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 046bb94..8c7843f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -44,7 +44,59 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1,
 };
 
-extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+#define MAX_POOL_NAME_SIZE 16
+
+struct swiotlb_pool {
+   char name[MAX_POOL_NAME_SIZE];
+   bool no_iotlb_memory;
+   int late_alloc;
+
+   spinlock_t io_tlb_lock;
+
+   /*
+* Used to do a quick range check in swiotlb_tbl_unmap_single and
+* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated
+* by this API.
+*/
+
+   phys_addr_t io_tlb_start, io_tlb_end;
+
+   /*
+* The number of IO TLB blocks (in groups of 64) between io_tlb_start
+* and io_tlb_end.  This is command line adjustable via
+* setup_io_tlb_npages.
+*/
+   unsigned long io_tlb_nslabs;
+
+   /*
+* The number of used IO TLB block
+*/
+   unsigned long io_tlb_used;
+
+   /*
+* This is a free list describing the number of free entries available
+* from each index
+*/
+   unsigned int *io_tlb_list;
+   unsigned int io_tlb_index;
+
+   /*
+* We need to save away the original address corresponding to a mapped
+* entry for the sync operations.
+*/
+   phys_addr_t *io_tlb_orig_addr;
+
+   /*
+* Max segment that we can provide which (if pages are contingous) will
+* not be bounced (unless SWIOTLB_FORCE is set).
+*/
+   unsigned int max_segment;
+};
+
+extern struct swiotlb_pool default_swiotlb_pool;
+
+extern phys_addr_t _swiotlb_tbl_map_single(struct swiotlb_pool *pool,
+ struct device *hwdev,
  dma_addr_t tbl_dma_addr,
  phys_addr_t phys,
  size_t mapping_size,
@@ -52,28 +104,80 @@ extern phys_addr_t swiotlb_tbl_map_single(struct device 
*hwdev,
  enum dma_data_direction dir,
  unsigned long attrs);
 
-extern void swiotlb_tbl_unmap_single(struct device *hwdev,
+extern void _swiotlb_tbl_unmap_single(struct swiotlb_pool *pool,
+struct device *hwdev,
 phys_addr_t tlb_addr,
 size_t mapping_size,
 size_t alloc_size,
 enum dma_data_direction dir,
 unsigned long attrs);
 
-extern void swiotlb_tbl_sync_single(struct device *hwdev,
+extern void _swiotlb_tbl_sync_single(struct swiotlb_pool *pool,
+   struct device *hwdev,
phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
 
-dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
-   size_t size, enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t _swiotlb_map(struct swiotlb_pool *pool, struct device *dev,
+   phys_addr_t phys, size_t size, enum dma_data_direction dir,
+   unsigned long attrs);
+
+static inline phys_addr_t