[PATCH 14/14] swiotlb: remove swiotlb_nr_tbl

2021-02-28 Thread Christoph Hellwig
All callers just use it to check if swiotlb is active at all, for which
they can just use is_swiotlb_active.  In the longer run drivers need
to stop using is_swiotlb_active as well, but let's do the simple step
first.

Signed-off-by: Christoph Hellwig 
---
 drivers/gpu/drm/i915/gem/i915_gem_internal.c | 2 +-
 drivers/gpu/drm/nouveau/nouveau_ttm.c| 2 +-
 drivers/pci/xen-pcifront.c   | 2 +-
 include/linux/swiotlb.h  | 1 -
 kernel/dma/swiotlb.c | 7 +--
 5 files changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c 
b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ad22f42541bda6..a9d65fc8aa0eab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
 
max_order = MAX_ORDER;
 #ifdef CONFIG_SWIOTLB
-   if (swiotlb_nr_tbl()) {
+   if (is_swiotlb_active()) {
unsigned int max_segment;
 
max_segment = swiotlb_max_segment();
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c 
b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index a37bc3d7b38b3b..9662522aa0664a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
 
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
-   need_swiotlb = !!swiotlb_nr_tbl();
+   need_swiotlb = is_swiotlb_active();
 #endif
 
ret = ttm_bo_device_init(>ttm.bdev, _bo_driver,
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c6fe0cfec0f681..a549e822033fd6 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct 
pcifront_device *pdev)
 
spin_unlock(_dev_lock);
 
-   if (!err && !swiotlb_nr_tbl()) {
+   if (!err && !is_swiotlb_active()) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(>xdev->dev, "Could not setup SWIOTLB!\n");
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 63f7a63f61d098..216854a5e5134b 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -37,7 +37,6 @@ enum swiotlb_force {
 
 extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swiotlb_nr_tbl(void);
 unsigned long swiotlb_size_or_default(void);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 extern int swiotlb_late_init_with_default_size(size_t default_size);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index b7bcd7b804bfe8..809d5fdc204675 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -94,12 +94,6 @@ setup_io_tlb_npages(char *str)
 }
 early_param("swiotlb", setup_io_tlb_npages);
 
-unsigned long swiotlb_nr_tbl(void)
-{
-   return io_tlb_default_mem ? io_tlb_default_mem->nslabs : 0;
-}
-EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
-
 unsigned int swiotlb_max_segment(void)
 {
return io_tlb_default_mem ? max_segment : 0;
@@ -652,6 +646,7 @@ bool is_swiotlb_active(void)
 {
return io_tlb_default_mem != NULL;
 }
+EXPORT_SYMBOL_GPL(is_swiotlb_active);
 
 #ifdef CONFIG_DEBUG_FS
 
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 13/14] swiotlb: dynamically allocate io_tlb_default_mem

2021-02-28 Thread Christoph Hellwig
Instead of allocating ->list and ->orig_addr separately just do one
dynamic allocation for the actual io_tlb_mem structure.  This simplifies
a lot of the initialization code, and also allows to just check
io_tlb_default_mem to see if swiotlb is in use.

Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c |  22 +--
 include/linux/swiotlb.h   |  18 ++-
 kernel/dma/swiotlb.c  | 300 +-
 3 files changed, 117 insertions(+), 223 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 5329ad54a5f34e..4c89afc0df6289 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -158,17 +158,14 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err 
err)
 int __ref xen_swiotlb_init(void)
 {
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
-   unsigned long nslabs, bytes, order;
-   unsigned int repeat = 3;
+   unsigned long bytes = swiotlb_size_or_default();
+   unsigned long nslabs = bytes >> IO_TLB_SHIFT;
+   unsigned int order, repeat = 3;
int rc = -ENOMEM;
char *start;
 
-   nslabs = swiotlb_nr_tbl();
-   if (!nslabs)
-   nslabs = DEFAULT_NSLABS;
 retry:
m_ret = XEN_SWIOTLB_ENOMEM;
-   bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
 
/*
@@ -221,19 +218,16 @@ int __ref xen_swiotlb_init(void)
 #ifdef CONFIG_X86
 void __init xen_swiotlb_init_early(void)
 {
-   unsigned long nslabs, bytes;
+   unsigned long bytes = swiotlb_size_or_default();
+   unsigned long nslabs = bytes >> IO_TLB_SHIFT;
unsigned int repeat = 3;
char *start;
int rc;
 
-   nslabs = swiotlb_nr_tbl();
-   if (!nslabs)
-   nslabs = DEFAULT_NSLABS;
 retry:
/*
 * Get IO TLB memory from any location.
 */
-   bytes = nslabs << IO_TLB_SHIFT;
start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -248,8 +242,8 @@ void __init xen_swiotlb_init_early(void)
if (repeat--) {
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
-   pr_info("Lowering to %luMB\n",
-   (nslabs << IO_TLB_SHIFT) >> 20);
+   bytes = nslabs << IO_TLB_SHIFT;
+   pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
@@ -548,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct 
scatterlist *sgl,
 static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-   return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
+   return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
 }
 
 const struct dma_map_ops xen_swiotlb_dma_ops = {
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5ec5378b17c333..63f7a63f61d098 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -90,28 +90,30 @@ struct io_tlb_mem {
phys_addr_t end;
unsigned long nslabs;
unsigned long used;
-   unsigned int *list;
unsigned int index;
-   phys_addr_t *orig_addr;
-   size_t *alloc_size;
spinlock_t lock;
struct dentry *debugfs;
bool late_alloc;
+   struct io_tlb_slot {
+   phys_addr_t orig_addr;
+   size_t alloc_size;
+   unsigned int list;
+   } slots[];
 };
-extern struct io_tlb_mem io_tlb_default_mem;
+extern struct io_tlb_mem *io_tlb_default_mem;
 
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
 {
-   struct io_tlb_mem *mem = _tlb_default_mem;
+   struct io_tlb_mem *mem = io_tlb_default_mem;
 
-   return paddr >= mem->start && paddr < mem->end;
+   return mem && paddr >= mem->start && paddr < mem->end;
 }
 
 void __init swiotlb_exit(void);
 unsigned int swiotlb_max_segment(void);
 size_t swiotlb_max_mapping_size(struct device *dev);
 bool is_swiotlb_active(void);
-void __init swiotlb_adjust_size(unsigned long new_size);
+void __init swiotlb_adjust_size(unsigned long size);
 #else
 #define swiotlb_force SWIOTLB_NO_FORCE
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -135,7 +137,7 @@ static inline bool is_swiotlb_active(void)
return false;
 }
 
-static inline void swiotlb_adjust_size(unsigned long new_size)
+static inline void swiotlb_adjust_size(unsigned long size)
 {
 }
 #endif /* CONFIG_SWIOTLB */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 6aa84fa3b1467e..b7bcd7b804bfe8 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -63,7 +63,7 @@
 
 enum swiotlb_force swiotlb_force;
 
-struct io_tlb_mem io_tlb_default_mem;
+struct io_tlb_mem *io_tlb_default_mem;
 
 /*
  * Max segment that we can 

[PATCH 12/14] swiotlb: move global variables into a new io_tlb_mem structure

2021-02-28 Thread Christoph Hellwig
From: Claire Chang 

Added a new struct, io_tlb_mem, as the IO TLB memory pool descriptor and
moved relevant global variables into that struct.
This will be useful later to allow for restricted DMA pool.

Signed-off-by: Claire Chang 
[hch: rebased]
Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c |   2 +-
 include/linux/swiotlb.h   |  43 -
 kernel/dma/swiotlb.c  | 354 +-
 3 files changed, 203 insertions(+), 196 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4ecfce2c6f7263..5329ad54a5f34e 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -548,7 +548,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct 
scatterlist *sgl,
 static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-   return xen_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
+   return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
 }
 
 const struct dma_map_ops xen_swiotlb_dma_ops = {
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 0696bdc8072e97..5ec5378b17c333 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -6,6 +6,7 @@
 #include 
 #include 
 #include 
+#include 
 
 struct device;
 struct page;
@@ -61,11 +62,49 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
 
 #ifdef CONFIG_SWIOTLB
 extern enum swiotlb_force swiotlb_force;
-extern phys_addr_t io_tlb_start, io_tlb_end;
+
+/**
+ * struct io_tlb_mem - IO TLB Memory Pool Descriptor
+ *
+ * @start: The start address of the swiotlb memory pool. Used to do a quick
+ * range check to see if the memory was in fact allocated by this
+ * API.
+ * @end:   The end address of the swiotlb memory pool. Used to do a quick
+ * range check to see if the memory was in fact allocated by this
+ * API.
+ * @nslabs:The number of IO TLB blocks (in groups of 64) between @start and
+ * @end. This is command line adjustable via setup_io_tlb_npages.
+ * @used:  The number of used IO TLB block.
+ * @list:  The free list describing the number of free entries available
+ * from each index.
+ * @index: The index to start searching in the next round.
+ * @orig_addr: The original address corresponding to a mapped entry.
+ * @alloc_size:Size of the allocated buffer.
+ * @lock:  The lock to protect the above data structures in the map and
+ * unmap calls.
+ * @debugfs:   The dentry to debugfs.
+ * @late_alloc:%true if allocated using the page allocator
+ */
+struct io_tlb_mem {
+   phys_addr_t start;
+   phys_addr_t end;
+   unsigned long nslabs;
+   unsigned long used;
+   unsigned int *list;
+   unsigned int index;
+   phys_addr_t *orig_addr;
+   size_t *alloc_size;
+   spinlock_t lock;
+   struct dentry *debugfs;
+   bool late_alloc;
+};
+extern struct io_tlb_mem io_tlb_default_mem;
 
 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
 {
-   return paddr >= io_tlb_start && paddr < io_tlb_end;
+   struct io_tlb_mem *mem = _tlb_default_mem;
+
+   return paddr >= mem->start && paddr < mem->end;
 }
 
 void __init swiotlb_exit(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index ebe7c123e27e52..6aa84fa3b1467e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -59,32 +59,11 @@
  */
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 
-enum swiotlb_force swiotlb_force;
-
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by 
this
- * API.
- */
-phys_addr_t io_tlb_start, io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
- * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 
-/*
- * The number of used IO TLB block
- */
-static unsigned long io_tlb_used;
+enum swiotlb_force swiotlb_force;
 
-/*
- * This is a free list describing the number of free entries available from
- * each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
+struct io_tlb_mem io_tlb_default_mem;
 
 /*
  * Max segment that we can provide which (if pages are contingous) will
@@ -92,32 +71,15 @@ static unsigned int io_tlb_index;
  */
 static unsigned int max_segment;
 
-/*
- * We need to save away the original address corresponding to a mapped entry
- * for the sync operations.
- */
-#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
-static phys_addr_t *io_tlb_orig_addr;
-
-/*
- * The mapped buffer's size should be validated during a sync operation.
- */
-static size_t *io_tlb_alloc_size;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */
-static DEFINE_SPINLOCK(io_tlb_lock);
-
-static int late_alloc;
-
 static int __init
 

[PATCH 11/14] xen-swiotlb: remove the unused size argument from xen_swiotlb_fixup

2021-02-28 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 00adeb95ebb9df..4ecfce2c6f7263 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -104,8 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
dma_addr_t dma_addr)
return 0;
 }
 
-static int
-xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
+static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 {
int i, rc;
int dma_bits;
@@ -195,7 +194,7 @@ int __ref xen_swiotlb_init(void)
/*
 * And replace that memory with pages under 4GB.
 */
-   rc = xen_swiotlb_fixup(start, bytes, nslabs);
+   rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
free_pages((unsigned long)start, order);
m_ret = XEN_SWIOTLB_EFIXUP;
@@ -243,7 +242,7 @@ void __init xen_swiotlb_init_early(void)
/*
 * And replace that memory with pages under 4GB.
 */
-   rc = xen_swiotlb_fixup(start, bytes, nslabs);
+   rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
if (repeat--) {
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 08/14] xen-swiotlb: remove xen_io_tlb_start and xen_io_tlb_nslabs

2021-02-28 Thread Christoph Hellwig
The xen_io_tlb_start and xen_io_tlb_nslabs variables ar now only used in
xen_swiotlb_init, so replace them with local variables.

Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c | 57 +--
 1 file changed, 25 insertions(+), 32 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 5352655432e724..1a31ddf7139799 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -40,14 +40,7 @@
 
 #include 
 #define MAX_DMA_BITS 32
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by 
this
- * API.
- */
 
-static char *xen_io_tlb_start;
-static unsigned long xen_io_tlb_nslabs;
 /*
  * Quick lookup value of the bus address of the IOTLB.
  */
@@ -169,75 +162,75 @@ int __ref xen_swiotlb_init(int verbose, bool early)
int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned int repeat = 3;
+   char *start;
+   unsigned long nslabs;
 
-   xen_io_tlb_nslabs = swiotlb_nr_tbl();
+   nslabs = swiotlb_nr_tbl();
 retry:
-   if (!xen_io_tlb_nslabs)
-   xen_io_tlb_nslabs = DEFAULT_NSLABS;
-   bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
+   if (!nslabs)
+   nslabs = DEFAULT_NSLABS;
+   bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
 
/*
 * IO TLB memory already allocated. Just use it.
 */
-   if (io_tlb_start != 0) {
-   xen_io_tlb_start = phys_to_virt(io_tlb_start);
+   if (io_tlb_start != 0)
goto end;
-   }
 
/*
 * Get IO TLB memory from any location.
 */
if (early) {
-   xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
+   start = memblock_alloc(PAGE_ALIGN(bytes),
  PAGE_SIZE);
-   if (!xen_io_tlb_start)
+   if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
} else {
 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-   xen_io_tlb_start = (void 
*)xen_get_swiotlb_free_pages(order);
-   if (xen_io_tlb_start)
+   start = (void *)xen_get_swiotlb_free_pages(order);
+   if (start)
break;
order--;
}
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for 
software IO TLB\n",
(PAGE_SIZE << order) >> 20);
-   xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
-   bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
+   nslabs = SLABS_PER_PAGE << order;
+   bytes = nslabs << IO_TLB_SHIFT;
}
}
-   if (!xen_io_tlb_start) {
+   if (!start) {
m_ret = XEN_SWIOTLB_ENOMEM;
goto error;
}
/*
 * And replace that memory with pages under 4GB.
 */
-   rc = xen_swiotlb_fixup(xen_io_tlb_start,
+   rc = xen_swiotlb_fixup(start,
   bytes,
-  xen_io_tlb_nslabs);
+  nslabs);
if (rc) {
if (early)
-   memblock_free(__pa(xen_io_tlb_start),
+   memblock_free(__pa(start),
  PAGE_ALIGN(bytes));
else {
-   free_pages((unsigned long)xen_io_tlb_start, order);
-   xen_io_tlb_start = NULL;
+   free_pages((unsigned long)start, order);
+   start = NULL;
}
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
if (early) {
-   if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
+   if (swiotlb_init_with_tbl(start, nslabs,
 verbose))
panic("Cannot allocate SWIOTLB buffer");
rc = 0;
} else
-   rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, 
xen_io_tlb_nslabs);
+   rc = swiotlb_late_init_with_tbl(start, nslabs);
 
 end:
if (!rc)
@@ -246,17 +239,17 @@ int __ref xen_swiotlb_init(int verbose, bool early)
return rc;
 error:
if (repeat--) {
-   xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
-   (xen_io_tlb_nslabs >> 1));
+   nslabs = max(1024UL, /* Min is 2MB */
+ 

[PATCH 10/14] xen-swiotlb: split xen_swiotlb_init

2021-02-28 Thread Christoph Hellwig
Split xen_swiotlb_init into a normal an an early case.  That makes both
much simpler and more readable, and also allows marking the early
code as __init and x86-only.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/xen/mm.c  |   2 +-
 arch/x86/xen/pci-swiotlb-xen.c |   4 +-
 drivers/xen/swiotlb-xen.c  | 124 +++--
 include/xen/swiotlb-xen.h  |   3 +-
 4 files changed, 75 insertions(+), 58 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 467fa225c3d0ed..aae950cd053fea 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -140,7 +140,7 @@ static int __init xen_mm_init(void)
struct gnttab_cache_flush cflush;
if (!xen_initial_domain())
return 0;
-   xen_swiotlb_init(1, false);
+   xen_swiotlb_init();
 
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 19ae3e4fe4e98e..54f9aa7e845739 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void)
 void __init pci_xen_swiotlb_init(void)
 {
if (xen_swiotlb) {
-   xen_swiotlb_init(1, true /* early */);
+   xen_swiotlb_init_early();
dma_ops = _swiotlb_dma_ops;
 
 #ifdef CONFIG_PCI
@@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void)
if (xen_swiotlb)
return 0;
 
-   rc = xen_swiotlb_init(1, false /* late */);
+   rc = xen_swiotlb_init();
if (rc)
return rc;
 
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 060eeb056486f5..00adeb95ebb9df 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -156,96 +156,112 @@ static const char *xen_swiotlb_error(enum 
xen_swiotlb_err err)
 
 #define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
 
-int __ref xen_swiotlb_init(int verbose, bool early)
+int __ref xen_swiotlb_init(void)
 {
-   unsigned long bytes, order;
-   int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
+   unsigned long nslabs, bytes, order;
unsigned int repeat = 3;
+   int rc = -ENOMEM;
char *start;
-   unsigned long nslabs;
 
nslabs = swiotlb_nr_tbl();
-retry:
if (!nslabs)
nslabs = DEFAULT_NSLABS;
+retry:
+   m_ret = XEN_SWIOTLB_ENOMEM;
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
 
/*
 * Get IO TLB memory from any location.
 */
-   if (early) {
-   start = memblock_alloc(PAGE_ALIGN(bytes),
- PAGE_SIZE);
-   if (!start)
-   panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
-   } else {
 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-   while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-   start = (void *)xen_get_swiotlb_free_pages(order);
-   if (start)
-   break;
-   order--;
-   }
-   if (order != get_order(bytes)) {
-   pr_warn("Warning: only able to allocate %ld MB for 
software IO TLB\n",
-   (PAGE_SIZE << order) >> 20);
-   nslabs = SLABS_PER_PAGE << order;
-   bytes = nslabs << IO_TLB_SHIFT;
-   }
+   while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+   start = (void *)xen_get_swiotlb_free_pages(order);
+   if (start)
+   break;
+   order--;
}
-   if (!start) {
-   m_ret = XEN_SWIOTLB_ENOMEM;
+   if (!start)
goto error;
+   if (order != get_order(bytes)) {
+   pr_warn("Warning: only able to allocate %ld MB for software IO 
TLB\n",
+   (PAGE_SIZE << order) >> 20);
+   nslabs = SLABS_PER_PAGE << order;
+   bytes = nslabs << IO_TLB_SHIFT;
}
+
/*
 * And replace that memory with pages under 4GB.
 */
-   rc = xen_swiotlb_fixup(start,
-  bytes,
-  nslabs);
+   rc = xen_swiotlb_fixup(start, bytes, nslabs);
if (rc) {
-   if (early)
-   memblock_free(__pa(start),
- PAGE_ALIGN(bytes));
-   else {
-   free_pages((unsigned long)start, order);
-   start = NULL;
-   }
+   free_pages((unsigned long)start, order);
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
-   if 

[PATCH 09/14] swiotlb: lift the double initialization protection from xen-swiotlb

2021-02-28 Thread Christoph Hellwig
Lift the double initialization protection from xen-swiotlb to the core
code to avoid exposing too many swiotlb internals.  Also upgrade the
check to a warning as it should not happen.

Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c | 7 ---
 kernel/dma/swiotlb.c  | 8 
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1a31ddf7139799..060eeb056486f5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -172,12 +172,6 @@ int __ref xen_swiotlb_init(int verbose, bool early)
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
 
-   /*
-* IO TLB memory already allocated. Just use it.
-*/
-   if (io_tlb_start != 0)
-   goto end;
-
/*
 * Get IO TLB memory from any location.
 */
@@ -232,7 +226,6 @@ int __ref xen_swiotlb_init(int verbose, bool early)
} else
rc = swiotlb_late_init_with_tbl(start, nslabs);
 
-end:
if (!rc)
swiotlb_set_max_segment(PAGE_SIZE);
 
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 388d9be35b5795..ebe7c123e27e52 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -229,6 +229,10 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
unsigned long i, bytes;
size_t alloc_size;
 
+   /* protect against double initialization */
+   if (WARN_ON_ONCE(io_tlb_start))
+   return -ENOMEM;
+
bytes = nslabs << IO_TLB_SHIFT;
 
io_tlb_nslabs = nslabs;
@@ -367,6 +371,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 {
unsigned long i, bytes;
 
+   /* protect against double initialization */
+   if (WARN_ON_ONCE(io_tlb_start))
+   return -ENOMEM;
+
bytes = nslabs << IO_TLB_SHIFT;
 
io_tlb_nslabs = nslabs;
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 07/14] xen-swiotlb: remove xen_set_nslabs

2021-02-28 Thread Christoph Hellwig
The xen_set_nslabs function is a little weird, as it has just one
caller, that caller passes a global variable as the argument,
which is then overriden in the function and a derivative of it
returned.  Just add a cpp symbol for the default size using a readable
constant and open code the remaining three lines in the caller.

Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c | 19 +++
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index e99f0614dcb979..5352655432e724 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -138,16 +138,6 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long 
nslabs)
} while (i < nslabs);
return 0;
 }
-static unsigned long xen_set_nslabs(unsigned long nr_tbl)
-{
-   if (!nr_tbl) {
-   xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
-   xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
-   } else
-   xen_io_tlb_nslabs = nr_tbl;
-
-   return xen_io_tlb_nslabs << IO_TLB_SHIFT;
-}
 
 enum xen_swiotlb_err {
XEN_SWIOTLB_UNKNOWN = 0,
@@ -170,6 +160,9 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err 
err)
}
return "";
 }
+
+#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
+
 int __ref xen_swiotlb_init(int verbose, bool early)
 {
unsigned long bytes, order;
@@ -179,8 +172,10 @@ int __ref xen_swiotlb_init(int verbose, bool early)
 
xen_io_tlb_nslabs = swiotlb_nr_tbl();
 retry:
-   bytes = xen_set_nslabs(xen_io_tlb_nslabs);
-   order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
+   if (!xen_io_tlb_nslabs)
+   xen_io_tlb_nslabs = DEFAULT_NSLABS;
+   bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
+   order = get_order(bytes);
 
/*
 * IO TLB memory already allocated. Just use it.
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 06/14] xen-swiotlb: use io_tlb_end in xen_swiotlb_dma_supported

2021-02-28 Thread Christoph Hellwig
Use the existing variable that holds the physical address for
xen_io_tlb_end to simplify xen_swiotlb_dma_supported a bit, and remove
the otherwise unused xen_io_tlb_end variable and the xen_virt_to_bus
helper.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Konrad Rzeszutek Wilk 
---
 drivers/xen/swiotlb-xen.c | 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index bffb35993c9d5f..e99f0614dcb979 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -46,7 +46,7 @@
  * API.
  */
 
-static char *xen_io_tlb_start, *xen_io_tlb_end;
+static char *xen_io_tlb_start;
 static unsigned long xen_io_tlb_nslabs;
 /*
  * Quick lookup value of the bus address of the IOTLB.
@@ -82,11 +82,6 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
 }
 
-static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
-{
-   return xen_phys_to_dma(dev, virt_to_phys(address));
-}
-
 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 {
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
@@ -250,7 +245,6 @@ int __ref xen_swiotlb_init(int verbose, bool early)
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, 
xen_io_tlb_nslabs);
 
 end:
-   xen_io_tlb_end = xen_io_tlb_start + bytes;
if (!rc)
swiotlb_set_max_segment(PAGE_SIZE);
 
@@ -558,7 +552,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct 
scatterlist *sgl,
 static int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-   return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
+   return xen_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
 }
 
 const struct dma_map_ops xen_swiotlb_dma_ops = {
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 05/14] xen-swiotlb: use is_swiotlb_buffer in is_xen_swiotlb_buffer

2021-02-28 Thread Christoph Hellwig
Use the is_swiotlb_buffer to check if a physical address is
a swiotlb buffer.  This works because xen-swiotlb does use the
same buffer as the main swiotlb code, and xen_io_tlb_{start,end}
are just the addresses for it that went through phys_to_virt.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Konrad Rzeszutek Wilk 
---
 drivers/xen/swiotlb-xen.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4e8a4e14942afd..bffb35993c9d5f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -111,10 +111,8 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
dma_addr_t dma_addr)
 * have the same virtual address as another address
 * in our domain. Therefore _only_ check address within our domain.
 */
-   if (pfn_valid(PFN_DOWN(paddr))) {
-   return paddr >= virt_to_phys(xen_io_tlb_start) &&
-  paddr < virt_to_phys(xen_io_tlb_end);
-   }
+   if (pfn_valid(PFN_DOWN(paddr)))
+   return is_swiotlb_buffer(paddr);
return 0;
 }
 
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 04/14] swiotlb: split swiotlb_tbl_sync_single

2021-02-28 Thread Christoph Hellwig
Split swiotlb_tbl_sync_single into two separate funtions for the to device
and to cpu synchronization.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/dma-iommu.c | 12 ++--
 drivers/xen/swiotlb-xen.c |  4 ++--
 include/linux/swiotlb.h   | 17 -
 kernel/dma/direct.c   |  8 
 kernel/dma/direct.h   |  4 ++--
 kernel/dma/swiotlb.c  | 34 +++---
 6 files changed, 33 insertions(+), 46 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index da2bd8f0885e6e..b57a0e3e21f6c7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -749,7 +749,7 @@ static void iommu_dma_sync_single_for_cpu(struct device 
*dev,
arch_sync_dma_for_cpu(phys, size, dir);
 
if (is_swiotlb_buffer(phys))
-   swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
+   swiotlb_sync_single_for_cpu(dev, phys, size, dir);
 }
 
 static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -762,7 +762,7 @@ static void iommu_dma_sync_single_for_device(struct device 
*dev,
 
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(phys))
-   swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
+   swiotlb_sync_single_for_device(dev, phys, size, dir);
 
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir);
@@ -783,8 +783,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
 
if (is_swiotlb_buffer(sg_phys(sg)))
-   swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
-   dir, SYNC_FOR_CPU);
+   swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
+   sg->length, dir);
}
 }
 
@@ -800,8 +800,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
 
for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg)))
-   swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
-   dir, SYNC_FOR_DEVICE);
+   swiotlb_sync_single_for_device(dev, sg_phys(sg),
+  sg->length, dir);
 
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index d47f1b311caac0..4e8a4e14942afd 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -462,7 +462,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, 
dma_addr_t dma_addr,
}
 
if (is_xen_swiotlb_buffer(dev, dma_addr))
-   swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+   swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
 }
 
 static void
@@ -472,7 +472,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, 
dma_addr_t dma_addr,
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
 
if (is_xen_swiotlb_buffer(dev, dma_addr))
-   swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+   swiotlb_sync_single_for_device(dev, paddr, size, dir);
 
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 59f421d041ed9e..0696bdc8072e97 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -42,14 +42,6 @@ extern int swiotlb_late_init_with_tbl(char *tlb, unsigned 
long nslabs);
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 extern void __init swiotlb_update_mem_attributes(void);
 
-/*
- * Enumeration for sync targets
- */
-enum dma_sync_target {
-   SYNC_FOR_CPU = 0,
-   SYNC_FOR_DEVICE = 1,
-};
-
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs);
@@ -60,11 +52,10 @@ extern void swiotlb_tbl_unmap_single(struct device *hwdev,
 enum dma_data_direction dir,
 unsigned long attrs);
 
-extern void swiotlb_tbl_sync_single(struct device *hwdev,
-   phys_addr_t tlb_addr,
-   size_t size, enum dma_data_direction dir,
-   enum dma_sync_target target);
-
+void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
+   size_t size, enum dma_data_direction dir);
+void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
+   size_t size, enum 

[PATCH 03/14] swiotlb: move orig addr and size validation into swiotlb_bounce

2021-02-28 Thread Christoph Hellwig
Move the code to find and validate the original buffer address and size
from the callers into swiotlb_bounce.  This means a tiny bit of extra
work in the swiotlb_map path, but avoids code duplication and a leads to
a better code structure.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/swiotlb.c | 59 +---
 1 file changed, 23 insertions(+), 36 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 03aa614565e417..a9063092f6f566 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -460,12 +460,25 @@ void __init swiotlb_exit(void)
 /*
  * Bounce: copy the swiotlb buffer from or back to the original dma location
  */
-static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
-  size_t size, enum dma_data_direction dir)
+static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t 
size,
+   enum dma_data_direction dir)
 {
+   int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+   size_t alloc_size = io_tlb_alloc_size[index];
+   phys_addr_t orig_addr = io_tlb_orig_addr[index];
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
 
+   if (orig_addr == INVALID_PHYS_ADDR)
+   return;
+
+   if (size > alloc_size) {
+   dev_WARN_ONCE(dev, 1,
+   "Buffer overflow detected. Allocation size: %zu. 
Mapping size: %zu.\n",
+   alloc_size, size);
+   size = alloc_size;
+   }
+
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping.  Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK;
@@ -644,21 +657,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, 
phys_addr_t orig_addr,
tlb_addr = slot_addr(io_tlb_start, index) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-   swiotlb_bounce(orig_addr, tlb_addr, mapping_size, 
DMA_TO_DEVICE);
+   swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;
 }
 
-static void validate_sync_size_and_truncate(struct device *hwdev, size_t 
alloc_size, size_t *size)
-{
-   if (*size > alloc_size) {
-   /* Warn and truncate mapping_size */
-   dev_WARN_ONCE(hwdev, 1,
-   "Attempt for buffer overflow. Original size: %zu. 
Mapping size: %zu.\n",
-   alloc_size, *size);
-   *size = alloc_size;
-   }
-}
-
 /*
  * tlb_addr is the physical address of the bounce buffer to unmap.
  */
@@ -669,19 +671,15 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, 
phys_addr_t tlb_addr,
unsigned long flags;
unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
-   phys_addr_t orig_addr = io_tlb_orig_addr[index];
-   size_t alloc_size = io_tlb_alloc_size[index];
-   int i, count, nslots = nr_slots(alloc_size + offset);
-
-   validate_sync_size_and_truncate(hwdev, alloc_size, _size);
+   int nslots = nr_slots(io_tlb_alloc_size[index] + offset);
+   int count, i;
 
/*
 * First, sync the memory before unmapping the entry
 */
-   if (orig_addr != INVALID_PHYS_ADDR &&
-   !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-   ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-   swiotlb_bounce(orig_addr, tlb_addr, mapping_size, 
DMA_FROM_DEVICE);
+   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+   (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+   swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
 
/*
 * Return the buffer to the free list by setting the corresponding
@@ -721,27 +719,16 @@ void swiotlb_tbl_sync_single(struct device *hwdev, 
phys_addr_t tlb_addr,
 size_t size, enum dma_data_direction dir,
 enum dma_sync_target target)
 {
-   int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-   size_t alloc_size = io_tlb_alloc_size[index];
-   phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
-   if (orig_addr == INVALID_PHYS_ADDR)
-   return;
-
-   validate_sync_size_and_truncate(hwdev, alloc_size, );
-
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-   swiotlb_bounce(orig_addr, tlb_addr,
-  size, DMA_FROM_DEVICE);
+   swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || 

[PATCH 02/14] swiotlb: remove the alloc_size parameter to swiotlb_tbl_unmap_single

2021-02-28 Thread Christoph Hellwig
Now that swiotlb remembers the allocation size there is no need to pass
it back to swiotlb_tbl_unmap_single.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/dma-iommu.c | 11 +++---
 drivers/xen/swiotlb-xen.c |  4 ++--
 include/linux/swiotlb.h   |  1 -
 kernel/dma/direct.h   |  2 +-
 kernel/dma/swiotlb.c  | 45 ---
 5 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9ab6ee22c11088..da2bd8f0885e6e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -493,8 +493,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
dma_addr_t dma_addr,
unsigned long attrs)
 {
struct iommu_domain *domain = iommu_get_dma_domain(dev);
-   struct iommu_dma_cookie *cookie = domain->iova_cookie;
-   struct iova_domain *iovad = >iovad;
phys_addr_t phys;
 
phys = iommu_iova_to_phys(domain, dma_addr);
@@ -504,8 +502,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
dma_addr_t dma_addr,
__iommu_dma_unmap(dev, dma_addr, size);
 
if (unlikely(is_swiotlb_buffer(phys)))
-   swiotlb_tbl_unmap_single(dev, phys, size,
-   iova_align(iovad, size), dir, attrs);
+   swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
 
 static bool dev_is_untrusted(struct device *dev)
@@ -580,10 +577,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device 
*dev, phys_addr_t phys,
}
 
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
-   if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
-   swiotlb_tbl_unmap_single(dev, phys, org_size,
-   aligned_size, dir, attrs);
-
+   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+   swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
 }
 
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2b385c1b4a99cb..d47f1b311caac0 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -406,7 +406,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, 
struct page *page,
 * Ensure that the address returned is DMA'ble
 */
if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
-   swiotlb_tbl_unmap_single(dev, map, size, size, dir,
+   swiotlb_tbl_unmap_single(dev, map, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
@@ -445,7 +445,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, 
dma_addr_t dev_addr,
 
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(hwdev, dev_addr))
-   swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
+   swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 }
 
 static void
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5857a937c63722..59f421d041ed9e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -57,7 +57,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 
phys_addr_t phys,
 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
 phys_addr_t tlb_addr,
 size_t mapping_size,
-size_t alloc_size,
 enum dma_data_direction dir,
 unsigned long attrs);
 
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index b9861557873768..e1bf721591c0cf 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -114,6 +114,6 @@ static inline void dma_direct_unmap_page(struct device 
*dev, dma_addr_t addr,
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 
if (unlikely(is_swiotlb_buffer(phys)))
-   swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
+   swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
 #endif /* _KERNEL_DMA_DIRECT_H */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index c10e855a03bc16..03aa614565e417 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -102,7 +102,7 @@ static phys_addr_t *io_tlb_orig_addr;
 /*
  * The mapped buffer's size should be validated during a sync operation.
  */
-static size_t *io_tlb_orig_size;
+static size_t *io_tlb_alloc_size;
 
 /*
  * Protect the above data structures in the map and unmap calls
@@ -253,15 +253,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
  __func__, alloc_size, PAGE_SIZE);
 
alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t));
-   io_tlb_orig_size = memblock_alloc(alloc_size, PAGE_SIZE);
-   if (!io_tlb_orig_size)
+   io_tlb_alloc_size = 

swiotlb cleanups v2

2021-02-28 Thread Christoph Hellwig
Hi Konrad,

this series contains a bunch of swiotlb cleanups, mostly to reduce the
amount of internals exposed to code outside of swiotlb.c, which should
helper to prepare for supporting multiple different bounce buffer pools.

Changes since v1:
 - rebased to v5.12-rc1
 - a few more cleanups
 - merge and forward port the patch from Claire to move all the global
   variables into a struct to prepare for multiple instances
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 01/14] powerpc/svm: stop using io_tlb_start

2021-02-28 Thread Christoph Hellwig
Use the local variable that is passed to swiotlb_init_with_tbl for
freeing the memory in the failure case to isolate the code a little
better from swiotlb internals.

Signed-off-by: Christoph Hellwig 
---
 arch/powerpc/platforms/pseries/svm.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/platforms/pseries/svm.c 
b/arch/powerpc/platforms/pseries/svm.c
index 7b739cc7a8a93e..1d829e257996fb 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -55,9 +55,9 @@ void __init svm_swiotlb_init(void)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
return;
 
-   if (io_tlb_start)
-   memblock_free_early(io_tlb_start,
-   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+
+   memblock_free_early(__pa(vstart),
+   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
panic("SVM: Cannot allocate SWIOTLB buffer");
 }
 
-- 
2.29.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 6/7] dma-iommu: implement ->alloc_noncontiguous

2021-02-28 Thread Christoph Hellwig
On Mon, Mar 01, 2021 at 04:17:42PM +0900, Sergey Senozhatsky wrote:
> > > Do you think we could add the attrs parameter to the
> > > dma_alloc_noncontiguous() API?
> > 
> > Yes, we could probably do that.
> 
> I can cook a patch, unless somebody is already looking into it.

I plan to resend the whole series with the comments very soon.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 6/7] dma-iommu: implement ->alloc_noncontiguous

2021-02-28 Thread Sergey Senozhatsky
On (21/02/16 09:49), Christoph Hellwig wrote:
> > When working on the videobuf2 integration with Sergey I noticed that
> > we always pass 0 as DMA attrs here, which removes the ability for
> > drivers to use DMA_ATTR_ALLOC_SINGLE_PAGES.
> > 
> > It's quite important from a system stability point of view, because by
> > default the iommu_dma allocator would prefer big order allocations for
> > TLB locality reasons. For many devices, though, it doesn't really
> > affect the performance, because of random access patterns, so single
> > pages are good enough and reduce the risk of allocation failures or
> > latency due to fragmentation.
> > 
> > Do you think we could add the attrs parameter to the
> > dma_alloc_noncontiguous() API?
> 
> Yes, we could probably do that.

I can cook a patch, unless somebody is already looking into it.

-ss
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] iommu/vt-d: Fix status code for Allocate/Free PASID command

2021-02-28 Thread Lu Baolu

On 2/27/21 3:39 PM, Zenghui Yu wrote:

As per Intel vt-d spec, Rev 3.0 (section 10.4.45 "Virtual Command Response
Register"), the status code of "No PASID available" error in response to
the Allocate PASID command is 2, not 1. The same for "Invalid PASID" error
in response to the Free PASID command.

We will otherwise see confusing kernel log under the command failure from
guest side. Fix it.

Fixes: 24f27d32ab6b ("iommu/vt-d: Enlightened PASID allocation")
Signed-off-by: Zenghui Yu 
---
  drivers/iommu/intel/pasid.h | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 97dfcffbf495..444c0bec221a 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -30,8 +30,8 @@
  #define VCMD_VRSP_IP  0x1
  #define VCMD_VRSP_SC(e)   (((e) >> 1) & 0x3)
  #define VCMD_VRSP_SC_SUCCESS  0
-#define VCMD_VRSP_SC_NO_PASID_AVAIL1
-#define VCMD_VRSP_SC_INVALID_PASID 1
+#define VCMD_VRSP_SC_NO_PASID_AVAIL2
+#define VCMD_VRSP_SC_INVALID_PASID 2
  #define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xf)
  #define VCMD_CMD_OPERAND(e)   ((e) << 8)
  /*



Thanks a lot for catching this.

Acked-by: Lu Baolu 

Best regards,
baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu