Re: [PATCH v2 3/9] dma-iommu: bounce buffers for untrusted devices

2021-08-09 Thread David Stevens
On Tue, Aug 10, 2021 at 10:19 AM Mi, Dapeng1  wrote:
>
> Hi David,
>
> I like this patch set and this is crucial for reducing the significant vIOMMU 
> performance. It looks you totally rewrite the IOMMU mapping/unmapping part 
> and use the dynamically allocated memory from buddy system as bounce buffer 
> instead of using the legacy SWIOTLB bounce buffer. As I know, some legacy 
> devices' DMA could not access the memory larger than 32-bit memory space and 
> the dynamically allocated memory address could exceed the 32-bit memory 
> space. Is it a problem?

My understanding is that when devices with that sort of limitation sit
behind an IOMMU, the IOVA is what matters, not the physical address.
The bounce bounce buffers use the same limits for IOVA allocation as
the regular dma-iommu path, so compatible IOVAs will be allocated for
the bounce buffers.

-David

> Thx,
> Dapeng Mi
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


RE: [PATCH v2 3/9] dma-iommu: bounce buffers for untrusted devices

2021-08-09 Thread Mi, Dapeng1
Hi David,

I like this patch set and this is crucial for reducing the significant vIOMMU 
performance. It looks you totally rewrite the IOMMU mapping/unmapping part and 
use the dynamically allocated memory from buddy system as bounce buffer instead 
of using the legacy SWIOTLB bounce buffer. As I know, some legacy devices' DMA 
could not access the memory larger than 32-bit memory space and the dynamically 
allocated memory address could exceed the 32-bit memory space. Is it a problem?

Thx,
Dapeng Mi

-Original Message-
From: iommu  On Behalf Of David 
Stevens
Sent: Friday, August 6, 2021 6:34 PM
To: Robin Murphy 
Cc: linux-ker...@vger.kernel.org; Sergey Senozhatsky 
; iommu@lists.linux-foundation.org; David Stevens 
; Will Deacon ; Christoph Hellwig 

Subject: [PATCH v2 3/9] dma-iommu: bounce buffers for untrusted devices

From: David Stevens 

Add support for dynamic bounce buffers to the dma-api for use with subgranule 
IOMMU mappings with untrusted devices. Bounce buffer management is split into 
two parts. First, there is a buffer manager that is responsible for allocating 
and tracking buffers. Second, there is a layer that uses the managed buffers as 
bounce buffers. It is responsible for managing the IOMMU mapping and for 
syncing between the original and bounce buffers.

For now, buffer management is very simple - every mapping allocates a new 
bounce buffer.

Signed-off-by: David Stevens 
---
 drivers/iommu/Makefile|   2 +-
 drivers/iommu/dma-iommu.c |  70 +-
 drivers/iommu/io-bounce-buffers.c | 358 ++  
drivers/iommu/io-bounce-buffers.h |  46   drivers/iommu/io-buffer-manager.c 
| 212 ++  drivers/iommu/io-buffer-manager.h |  43 
 6 files changed, 728 insertions(+), 3 deletions(-)  create mode 100644 
drivers/iommu/io-bounce-buffers.c  create mode 100644 
drivers/iommu/io-bounce-buffers.h  create mode 100644 
drivers/iommu/io-buffer-manager.c  create mode 100644 
drivers/iommu/io-buffer-manager.h

diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 
c0fb0ba88143..4edaf7adc082 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_IOMMU_API) += iommu.o
 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
 obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
-obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
+obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o io-bounce-buffers.o 
+io-buffer-manager.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o diff --git 
a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 
055ccda5eba1..908eb6fb7dc3 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -24,6 +24,8 @@
 #include 
 #include 
 
+#include "io-bounce-buffers.h"
+
 struct iommu_dma_msi_page {
struct list_headlist;
dma_addr_t  iova;
@@ -44,6 +46,7 @@ struct iommu_dma_cookie {
dma_addr_t  msi_iova;
};
struct list_headmsi_page_list;
+   struct io_bounce_buffers*bounce_buffers;
 
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
@@ -81,6 +84,14 @@ static inline size_t cookie_msi_granule(struct 
iommu_dma_cookie *cookie)
return PAGE_SIZE;
 }
 
+static struct io_bounce_buffers *dev_to_io_bounce_buffers(struct device 
+*dev) {
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+
+   return cookie->bounce_buffers;
+}
+
 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)  
{
struct iommu_dma_cookie *cookie;
@@ -160,6 +171,9 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
 
+   if (cookie->bounce_buffers)
+   io_bounce_buffers_destroy(cookie->bounce_buffers);
+
if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
put_iova_domain(&cookie->iovad);
 
@@ -333,6 +347,7 @@ static int iommu_dma_init_domain(struct iommu_domain 
*domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
+   int ret;
 
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -380,7 +395,16 @@ static int iommu_dma_init_domain(struct iommu_domain 
*domain, dma_addr_t base,
if (!dev)
return 0;
 
-   return iova_reserve_iommu_regions(dev, domain);
+   ret = iova_reserve_iommu_regions(dev, domain);
+
+   if (ret == 0 && dev_is_untrusted(dev)) {
+   cookie->bounce_buffers =

Re: [PATCH v2 3/9] dma-iommu: bounce buffers for untrusted devices

2021-08-06 Thread kernel test robot
Hi David,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on iommu/next]
[also build test WARNING on drm-intel/for-linux-next hch-configfs/for-next 
linus/master v5.14-rc4]
[cannot apply to next-20210805]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/David-Stevens/Add-dynamic-iommu-backed-bounce-buffers/20210806-183631
base:   https://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git next
config: ia64-randconfig-r025-20210804 (attached as .config)
compiler: ia64-linux-gcc (GCC) 10.3.0
reproduce (this is a W=1 build):
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# 
https://github.com/0day-ci/linux/commit/c5f1f9fa88a7062c1ded50fa165f6b01ed73f161
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review 
David-Stevens/Add-dynamic-iommu-backed-bounce-buffers/20210806-183631
git checkout c5f1f9fa88a7062c1ded50fa165f6b01ed73f161
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-10.3.0 make.cross 
ARCH=ia64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All warnings (new ones prefixed by >>):

>> drivers/iommu/io-buffer-manager.c:57:24: warning: no previous prototype for 
>> 'find_fallback_node' [-Wmissing-prototypes]
  57 | struct io_buffer_node *find_fallback_node(struct rb_root *root, 
dma_addr_t iova)
 |^~
>> drivers/iommu/io-buffer-manager.c:75:6: warning: no previous prototype for 
>> 'insert_fallback_node' [-Wmissing-prototypes]
  75 | bool insert_fallback_node(struct rb_root *root, struct 
io_buffer_node *node)
 |  ^~~~


vim +/find_fallback_node +57 drivers/iommu/io-buffer-manager.c

56  
  > 57  struct io_buffer_node *find_fallback_node(struct rb_root *root, 
dma_addr_t iova)
58  {
59  struct rb_node *node = root->rb_node;
60  
61  while (node) {
62  struct io_buffer_node *cur =
63  container_of(node, struct io_buffer_node, node);
64  
65  if (iova < cur->info.iova)
66  node = node->rb_left;
67  else if (iova >= cur->info.iova + cur->info.size)
68  node = node->rb_right;
69  else
70  return cur;
71  }
72  return NULL;
73  }
74  
  > 75  bool insert_fallback_node(struct rb_root *root, struct io_buffer_node 
*node)
76  {
77  struct rb_node **new = &(root->rb_node), *parent = NULL;
78  dma_addr_t node_end = node->info.iova + node->info.size;
79  
80  while (*new) {
81  struct io_buffer_node *cur =
82  container_of(*new, struct io_buffer_node, node);
83  dma_addr_t cur_end = cur->info.iova + cur->info.size;
84  
85  parent = *new;
86  if (node_end <= cur->info.iova)
87  new = &((*new)->rb_left);
88  else if (node->info.iova >= cur_end)
89  new = &((*new)->rb_right);
90  else {
91  pr_crit("IOVA collision new=[%llx,%llx) 
old=[%llx,%llx)\n",
92  node->info.iova, node_end, 
cur->info.iova,
93  cur_end);
94  return false;
95  }
96  }
97  
98  rb_link_node(&node->node, parent, new);
99  rb_insert_color(&node->node, root);
   100  return true;
   101  }
   102  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org


.config.gz
Description: application/gzip
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

[PATCH v2 3/9] dma-iommu: bounce buffers for untrusted devices

2021-08-06 Thread David Stevens
From: David Stevens 

Add support for dynamic bounce buffers to the dma-api for use with
subgranule IOMMU mappings with untrusted devices. Bounce buffer
management is split into two parts. First, there is a buffer manager
that is responsible for allocating and tracking buffers. Second, there
is a layer that uses the managed buffers as bounce buffers. It is
responsible for managing the IOMMU mapping and for syncing between the
original and bounce buffers.

For now, buffer management is very simple - every mapping allocates a
new bounce buffer.

Signed-off-by: David Stevens 
---
 drivers/iommu/Makefile|   2 +-
 drivers/iommu/dma-iommu.c |  70 +-
 drivers/iommu/io-bounce-buffers.c | 358 ++
 drivers/iommu/io-bounce-buffers.h |  46 
 drivers/iommu/io-buffer-manager.c | 212 ++
 drivers/iommu/io-buffer-manager.h |  43 
 6 files changed, 728 insertions(+), 3 deletions(-)
 create mode 100644 drivers/iommu/io-bounce-buffers.c
 create mode 100644 drivers/iommu/io-bounce-buffers.h
 create mode 100644 drivers/iommu/io-buffer-manager.c
 create mode 100644 drivers/iommu/io-buffer-manager.h

diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c0fb0ba88143..4edaf7adc082 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_IOMMU_API) += iommu.o
 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
 obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
-obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
+obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o io-bounce-buffers.o io-buffer-manager.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 055ccda5eba1..908eb6fb7dc3 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -24,6 +24,8 @@
 #include 
 #include 
 
+#include "io-bounce-buffers.h"
+
 struct iommu_dma_msi_page {
struct list_headlist;
dma_addr_t  iova;
@@ -44,6 +46,7 @@ struct iommu_dma_cookie {
dma_addr_t  msi_iova;
};
struct list_headmsi_page_list;
+   struct io_bounce_buffers*bounce_buffers;
 
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
@@ -81,6 +84,14 @@ static inline size_t cookie_msi_granule(struct 
iommu_dma_cookie *cookie)
return PAGE_SIZE;
 }
 
+static struct io_bounce_buffers *dev_to_io_bounce_buffers(struct device *dev)
+{
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+
+   return cookie->bounce_buffers;
+}
+
 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
 {
struct iommu_dma_cookie *cookie;
@@ -160,6 +171,9 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
 
+   if (cookie->bounce_buffers)
+   io_bounce_buffers_destroy(cookie->bounce_buffers);
+
if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
put_iova_domain(&cookie->iovad);
 
@@ -333,6 +347,7 @@ static int iommu_dma_init_domain(struct iommu_domain 
*domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
+   int ret;
 
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -380,7 +395,16 @@ static int iommu_dma_init_domain(struct iommu_domain 
*domain, dma_addr_t base,
if (!dev)
return 0;
 
-   return iova_reserve_iommu_regions(dev, domain);
+   ret = iova_reserve_iommu_regions(dev, domain);
+
+   if (ret == 0 && dev_is_untrusted(dev)) {
+   cookie->bounce_buffers =
+   io_bounce_buffers_init(dev, domain, iovad);
+   if (IS_ERR(cookie->bounce_buffers))
+   ret = PTR_ERR(cookie->bounce_buffers);
+   }
+
+   return ret;
 }
 
 /**
@@ -710,8 +734,13 @@ static void iommu_dma_free_noncontiguous(struct device 
*dev, size_t size,
 static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 {
+   struct io_bounce_buffers *bounce = dev_to_io_bounce_buffers(dev);
phys_addr_t phys;
 
+   if (bounce && io_bounce_buffers_sync_single(bounce, dma_handle,
+   size, dir, true))
+   return;
+
if (dev_is_dma_coherent(dev))
return;
 
@@ -722,8 +751,13 @@ static void iommu_dma_sync_single_for_cpu(struct device 
*dev,
 static void iommu_dma_sync_single_for_device(struct de