Re: [PATCH v14 4/4] vhost-vdpa: Add support for vIOMMU.

2023-03-21 Thread Cindy Lu
On Tue, Mar 21, 2023 at 11:21 AM Jason Wang  wrote:
>
> On Tue, Mar 21, 2023 at 12:20 AM Cindy Lu  wrote:
> >
> > 1. The vIOMMU support will make vDPA can work in IOMMU mode. This
> > will fix security issues while using the no-IOMMU mode.
> > To support this feature we need to add new functions for IOMMU MR adds and
> > deletes.
> >
> > Also since the SVQ does not support vIOMMU yet, add the check for IOMMU
> > in vhost_vdpa_dev_start, if the SVQ and IOMMU enable at the same time
> > the function will return fail.
> >
> > 2. Skip the iova_max check vhost_vdpa_listener_skipped_section(). While
> > MR is IOMMU, move this check to  vhost_vdpa_iommu_map_notify()
> >
> > Verified in vp_vdpa and vdpa_sim_net driver
> >
> > Signed-off-by: Cindy Lu 
> > ---
> >  hw/virtio/vhost-vdpa.c | 149 +++--
> >  include/hw/virtio/vhost-vdpa.h |  11 +++
> >  2 files changed, 152 insertions(+), 8 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 0c8c37e786..b36922b365 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -26,6 +26,7 @@
> >  #include "cpu.h"
> >  #include "trace.h"
> >  #include "qapi/error.h"
> > +#include "hw/virtio/virtio-access.h"
> >
> >  /*
> >   * Return one past the end of the end of section. Be careful with uint64_t
> > @@ -60,15 +61,22 @@ static bool 
> > vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> >   iova_min, section->offset_within_address_space);
> >  return true;
> >  }
> > +/*
> > + * While using vIOMMU, sometimes the section will be larger than 
> > iova_max,
> > + * but the memory that actually maps is smaller, so move the check to
> > + * function vhost_vdpa_iommu_map_notify(). That function will use the 
> > actual
> > + * size that maps to the kernel
> > + */
> >
> > -llend = vhost_vdpa_section_end(section);
> > -if (int128_gt(llend, int128_make64(iova_max))) {
> > -error_report("RAM section out of device range (max=0x%" PRIx64
> > - ", end addr=0x%" PRIx64 ")",
> > - iova_max, int128_get64(llend));
> > -return true;
> > +if (!memory_region_is_iommu(section->mr)) {
> > +llend = vhost_vdpa_section_end(section);
> > +if (int128_gt(llend, int128_make64(iova_max))) {
> > +error_report("RAM section out of device range (max=0x%" PRIx64
> > + ", end addr=0x%" PRIx64 ")",
> > + iova_max, int128_get64(llend));
> > +return true;
> > +}
> >  }
> > -
>
> Unnecessary changes.
>
will fix this
> >  return false;
> >  }
> >
> > @@ -185,6 +193,118 @@ static void vhost_vdpa_listener_commit(MemoryListener 
> > *listener)
> >  v->iotlb_batch_begin_sent = false;
> >  }
> >
> > +static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry 
> > *iotlb)
> > +{
> > +struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
> > +
> > +hwaddr iova = iotlb->iova + iommu->iommu_offset;
> > +struct vhost_vdpa *v = iommu->dev;
> > +void *vaddr;
> > +int ret;
> > +Int128 llend;
> > +
> > +if (iotlb->target_as != _space_memory) {
> > +error_report("Wrong target AS \"%s\", only system memory is 
> > allowed",
> > + iotlb->target_as->name ? iotlb->target_as->name : 
> > "none");
> > +return;
> > +}
> > +RCU_READ_LOCK_GUARD();
> > +/* check if RAM section out of device range */
> > +llend = int128_add(int128_makes64(iotlb->addr_mask), 
> > int128_makes64(iova));
> > +if (int128_gt(llend, int128_make64(v->iova_range.last))) {
> > +error_report("RAM section out of device range (max=0x%" PRIx64
> > + ", end addr=0x%" PRIx64 ")",
> > + v->iova_range.last, int128_get64(llend));
> > +return;
> > +}
> > +
> > +vhost_vdpa_iotlb_batch_begin_once(v);
>
> Quoted from you answer in V1:
>
> "
> the VHOST_IOTLB_BATCH_END message was send by
> vhost_vdpa_listener_commit, because we only use
> one vhost_vdpa_memory_listener and no-iommu mode will also need to use
> this listener, So we still need to add the batch begin here, based on
> my testing after the notify function was called,  the listener_commit
> function was also called .so it works well in this situation
> "
>
> This assumes the map_notify to be called within the memory
> transactions which is not necessarily the case.
>
> I think it could be triggered when guest tries to establish a new
> mapping in the vIOMMU. In this case there's no memory transactions at
> all?
>
sure, thanks will fix this
> Thanks
>




Re: [PATCH v14 4/4] vhost-vdpa: Add support for vIOMMU.

2023-03-20 Thread Jason Wang
On Tue, Mar 21, 2023 at 12:20 AM Cindy Lu  wrote:
>
> 1. The vIOMMU support will make vDPA can work in IOMMU mode. This
> will fix security issues while using the no-IOMMU mode.
> To support this feature we need to add new functions for IOMMU MR adds and
> deletes.
>
> Also since the SVQ does not support vIOMMU yet, add the check for IOMMU
> in vhost_vdpa_dev_start, if the SVQ and IOMMU enable at the same time
> the function will return fail.
>
> 2. Skip the iova_max check vhost_vdpa_listener_skipped_section(). While
> MR is IOMMU, move this check to  vhost_vdpa_iommu_map_notify()
>
> Verified in vp_vdpa and vdpa_sim_net driver
>
> Signed-off-by: Cindy Lu 
> ---
>  hw/virtio/vhost-vdpa.c | 149 +++--
>  include/hw/virtio/vhost-vdpa.h |  11 +++
>  2 files changed, 152 insertions(+), 8 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 0c8c37e786..b36922b365 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -26,6 +26,7 @@
>  #include "cpu.h"
>  #include "trace.h"
>  #include "qapi/error.h"
> +#include "hw/virtio/virtio-access.h"
>
>  /*
>   * Return one past the end of the end of section. Be careful with uint64_t
> @@ -60,15 +61,22 @@ static bool 
> vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
>   iova_min, section->offset_within_address_space);
>  return true;
>  }
> +/*
> + * While using vIOMMU, sometimes the section will be larger than 
> iova_max,
> + * but the memory that actually maps is smaller, so move the check to
> + * function vhost_vdpa_iommu_map_notify(). That function will use the 
> actual
> + * size that maps to the kernel
> + */
>
> -llend = vhost_vdpa_section_end(section);
> -if (int128_gt(llend, int128_make64(iova_max))) {
> -error_report("RAM section out of device range (max=0x%" PRIx64
> - ", end addr=0x%" PRIx64 ")",
> - iova_max, int128_get64(llend));
> -return true;
> +if (!memory_region_is_iommu(section->mr)) {
> +llend = vhost_vdpa_section_end(section);
> +if (int128_gt(llend, int128_make64(iova_max))) {
> +error_report("RAM section out of device range (max=0x%" PRIx64
> + ", end addr=0x%" PRIx64 ")",
> + iova_max, int128_get64(llend));
> +return true;
> +}
>  }
> -

Unnecessary changes.

>  return false;
>  }
>
> @@ -185,6 +193,118 @@ static void vhost_vdpa_listener_commit(MemoryListener 
> *listener)
>  v->iotlb_batch_begin_sent = false;
>  }
>
> +static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry 
> *iotlb)
> +{
> +struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
> +
> +hwaddr iova = iotlb->iova + iommu->iommu_offset;
> +struct vhost_vdpa *v = iommu->dev;
> +void *vaddr;
> +int ret;
> +Int128 llend;
> +
> +if (iotlb->target_as != _space_memory) {
> +error_report("Wrong target AS \"%s\", only system memory is allowed",
> + iotlb->target_as->name ? iotlb->target_as->name : 
> "none");
> +return;
> +}
> +RCU_READ_LOCK_GUARD();
> +/* check if RAM section out of device range */
> +llend = int128_add(int128_makes64(iotlb->addr_mask), 
> int128_makes64(iova));
> +if (int128_gt(llend, int128_make64(v->iova_range.last))) {
> +error_report("RAM section out of device range (max=0x%" PRIx64
> + ", end addr=0x%" PRIx64 ")",
> + v->iova_range.last, int128_get64(llend));
> +return;
> +}
> +
> +vhost_vdpa_iotlb_batch_begin_once(v);

Quoted from you answer in V1:

"
the VHOST_IOTLB_BATCH_END message was send by
vhost_vdpa_listener_commit, because we only use
one vhost_vdpa_memory_listener and no-iommu mode will also need to use
this listener, So we still need to add the batch begin here, based on
my testing after the notify function was called,  the listener_commit
function was also called .so it works well in this situation
"

This assumes the map_notify to be called within the memory
transactions which is not necessarily the case.

I think it could be triggered when guest tries to establish a new
mapping in the vIOMMU. In this case there's no memory transactions at
all?

Thanks




[PATCH v14 4/4] vhost-vdpa: Add support for vIOMMU.

2023-03-20 Thread Cindy Lu
1. The vIOMMU support will make vDPA can work in IOMMU mode. This
will fix security issues while using the no-IOMMU mode.
To support this feature we need to add new functions for IOMMU MR adds and
deletes.

Also since the SVQ does not support vIOMMU yet, add the check for IOMMU
in vhost_vdpa_dev_start, if the SVQ and IOMMU enable at the same time
the function will return fail.

2. Skip the iova_max check vhost_vdpa_listener_skipped_section(). While
MR is IOMMU, move this check to  vhost_vdpa_iommu_map_notify()

Verified in vp_vdpa and vdpa_sim_net driver

Signed-off-by: Cindy Lu 
---
 hw/virtio/vhost-vdpa.c | 149 +++--
 include/hw/virtio/vhost-vdpa.h |  11 +++
 2 files changed, 152 insertions(+), 8 deletions(-)

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 0c8c37e786..b36922b365 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -26,6 +26,7 @@
 #include "cpu.h"
 #include "trace.h"
 #include "qapi/error.h"
+#include "hw/virtio/virtio-access.h"
 
 /*
  * Return one past the end of the end of section. Be careful with uint64_t
@@ -60,15 +61,22 @@ static bool 
vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
  iova_min, section->offset_within_address_space);
 return true;
 }
+/*
+ * While using vIOMMU, sometimes the section will be larger than iova_max,
+ * but the memory that actually maps is smaller, so move the check to
+ * function vhost_vdpa_iommu_map_notify(). That function will use the 
actual
+ * size that maps to the kernel
+ */
 
-llend = vhost_vdpa_section_end(section);
-if (int128_gt(llend, int128_make64(iova_max))) {
-error_report("RAM section out of device range (max=0x%" PRIx64
- ", end addr=0x%" PRIx64 ")",
- iova_max, int128_get64(llend));
-return true;
+if (!memory_region_is_iommu(section->mr)) {
+llend = vhost_vdpa_section_end(section);
+if (int128_gt(llend, int128_make64(iova_max))) {
+error_report("RAM section out of device range (max=0x%" PRIx64
+ ", end addr=0x%" PRIx64 ")",
+ iova_max, int128_get64(llend));
+return true;
+}
 }
-
 return false;
 }
 
@@ -185,6 +193,118 @@ static void vhost_vdpa_listener_commit(MemoryListener 
*listener)
 v->iotlb_batch_begin_sent = false;
 }
 
+static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
+
+hwaddr iova = iotlb->iova + iommu->iommu_offset;
+struct vhost_vdpa *v = iommu->dev;
+void *vaddr;
+int ret;
+Int128 llend;
+
+if (iotlb->target_as != _space_memory) {
+error_report("Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+return;
+}
+RCU_READ_LOCK_GUARD();
+/* check if RAM section out of device range */
+llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
+if (int128_gt(llend, int128_make64(v->iova_range.last))) {
+error_report("RAM section out of device range (max=0x%" PRIx64
+ ", end addr=0x%" PRIx64 ")",
+ v->iova_range.last, int128_get64(llend));
+return;
+}
+
+vhost_vdpa_iotlb_batch_begin_once(v);
+
+if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
+bool read_only;
+
+if (!memory_get_xlat_addr(iotlb, , NULL, _only, NULL)) {
+return;
+}
+
+ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ iotlb->addr_mask + 1, vaddr, read_only);
+if (ret) {
+error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
+ "0x%" HWADDR_PRIx ", %p) = %d (%m)",
+ v, iova, iotlb->addr_mask + 1, vaddr, ret);
+}
+} else {
+ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+   iotlb->addr_mask + 1);
+if (ret) {
+error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
+ "0x%" HWADDR_PRIx ") = %d (%m)",
+ v, iova, iotlb->addr_mask + 1, ret);
+}
+}
+}
+
+static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
+MemoryRegionSection *section)
+{
+struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
+
+struct vdpa_iommu *iommu;
+Int128 end;
+int iommu_idx;
+IOMMUMemoryRegion *iommu_mr;
+int ret;
+
+iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+
+iommu = g_malloc0(sizeof(*iommu));
+end = int128_add(int128_make64(section->offset_within_region),
+ section->size);
+end = int128_sub(end, int128_one());
+