Re: [RFC PATCH 09/24] vdpa: multiple address spaces support

2020-10-08 Thread Jason Wang



On 2020/10/1 下午9:23, Eli Cohen wrote:
  
+	/* Only support 1 address space */

+   if (vdpa->ngroups != 1)
+   return -ENOTSUPP;

Checkpatch warning:  prefer EOPNOTSUPP



Will fix.

Thanks



Re: [RFC PATCH 09/24] vdpa: multiple address spaces support

2020-10-08 Thread Jason Wang



On 2020/10/1 下午9:21, Eli Cohen wrote:

On Thu, Sep 24, 2020 at 11:21:10AM +0800, Jason Wang wrote:

This patches introduces the multiple address spaces support for vDPA
device. This idea is to identify a specific address space via an
dedicated identifier - ASID.

During vDPA device allocation, vDPA device driver needs to report the
number of address spaces supported by the device then the DMA mapping
ops of the vDPA device needs to be extended to support ASID.

This helps to isolate the DMA among the virtqueues. E.g in the case of
virtio-net, the control virtqueue will not be assigned directly to
guest.

This RFC patch only converts for the device that wants its own
IOMMU/DMA translation logic. So it will rejects the device with more
that 1 address space that depends on platform IOMMU. The plan to

This is not apparent from the code. Instead you enforce number of groups
to 1.



Yes, will fix.





moving all the DMA mapping logic to the vDPA device driver instead of
doing it in vhost-vDPA (otherwise it could result a very complicated
APIs and actually vhost-vDPA doesn't care about how the actual
composition/emulation were done in the device driver).

Signed-off-by: Jason Wang 
---
  drivers/vdpa/ifcvf/ifcvf_main.c   |  2 +-
  drivers/vdpa/mlx5/net/mlx5_vnet.c |  5 +++--
  drivers/vdpa/vdpa.c   |  4 +++-
  drivers/vdpa/vdpa_sim/vdpa_sim.c  | 10 ++
  drivers/vhost/vdpa.c  | 14 +-
  include/linux/vdpa.h  | 23 ---
  6 files changed, 38 insertions(+), 20 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index e6a0be374e51..86cdf5f8bcae 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -440,7 +440,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
  
  	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,

dev, _vdpa_ops,
-   IFCVF_MAX_QUEUE_PAIRS * 2, 1);
+   IFCVF_MAX_QUEUE_PAIRS * 2, 1, 1);
  
  	if (adapter == NULL) {

IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 4e480f4f754e..db7404e121bf 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1788,7 +1788,8 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device 
*vdev)
return mvdev->generation;
  }
  
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)

+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+struct vhost_iotlb *iotlb)
  {
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -1931,7 +1932,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
  
  	ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, _vdpa_ops,

-2 * mlx5_vdpa_max_qps(max_vqs), 1);
+2 * mlx5_vdpa_max_qps(max_vqs), 1, 1);
if (IS_ERR(ndev))
return ndev;
  
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c

index 46399746ec7c..05195fa7865d 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -63,6 +63,7 @@ static void vdpa_release_dev(struct device *d)
   * @config: the bus operations that is supported by this device
   * @nvqs: number of virtqueues supported by this device
   * @ngroups: number of groups supported by this device
+ * @nas: number of address spaces supported by this device
   * @size: size of the parent structure that contains private data
   *
   * Driver should use vdpa_alloc_device() wrapper macro instead of
@@ -74,7 +75,7 @@ static void vdpa_release_dev(struct device *d)
  struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
int nvqs, unsigned int ngroups,
-   size_t size)
+   unsigned int nas, size_t size)
  {
struct vdpa_device *vdev;
int err = -EINVAL;
@@ -102,6 +103,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device 
*parent,
vdev->features_valid = false;
vdev->nvqs = nvqs;
vdev->ngroups = ngroups;
+   vdev->nas = nas;
  
  	err = dev_set_name(>dev, "vdpa%u", vdev->index);

if (err)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6669c561bc6e..5dc04ec271bb 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -354,7 +354,7 @@ static struct vdpasim *vdpasim_create(void)
ops = _net_config_ops;
  
  	vdpasim = vdpa_alloc_device(struct 

Re: [RFC PATCH 09/24] vdpa: multiple address spaces support

2020-10-01 Thread Eli Cohen
On Thu, Sep 24, 2020 at 11:21:10AM +0800, Jason Wang wrote:
> This patches introduces the multiple address spaces support for vDPA
> device. This idea is to identify a specific address space via an
> dedicated identifier - ASID.
> 
> During vDPA device allocation, vDPA device driver needs to report the
> number of address spaces supported by the device then the DMA mapping
> ops of the vDPA device needs to be extended to support ASID.
> 
> This helps to isolate the DMA among the virtqueues. E.g in the case of
> virtio-net, the control virtqueue will not be assigned directly to
> guest.
> 
> This RFC patch only converts for the device that wants its own
> IOMMU/DMA translation logic. So it will rejects the device with more
> that 1 address space that depends on platform IOMMU. The plan to
> moving all the DMA mapping logic to the vDPA device driver instead of
> doing it in vhost-vDPA (otherwise it could result a very complicated
> APIs and actually vhost-vDPA doesn't care about how the actual
> composition/emulation were done in the device driver).
> 
> Signed-off-by: Jason Wang 
> ---
>  drivers/vdpa/ifcvf/ifcvf_main.c   |  2 +-
>  drivers/vdpa/mlx5/net/mlx5_vnet.c |  5 +++--
>  drivers/vdpa/vdpa.c   |  4 +++-
>  drivers/vdpa/vdpa_sim/vdpa_sim.c  | 10 ++
>  drivers/vhost/vdpa.c  | 14 +-
>  include/linux/vdpa.h  | 23 ---
>  6 files changed, 38 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index e6a0be374e51..86cdf5f8bcae 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -440,7 +440,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct 
> pci_device_id *id)
>  
>   adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
>   dev, _vdpa_ops,
> - IFCVF_MAX_QUEUE_PAIRS * 2, 1);
> + IFCVF_MAX_QUEUE_PAIRS * 2, 1, 1);
>  
>   if (adapter == NULL) {
>   IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
> b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 4e480f4f754e..db7404e121bf 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1788,7 +1788,8 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device 
> *vdev)
>   return mvdev->generation;
>  }
>  
> -static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb 
> *iotlb)
> +static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
> +  struct vhost_iotlb *iotlb)
>  {
>   struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
>   struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> @@ -1931,7 +1932,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
>   max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
>  
>   ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, 
> mdev->device, _vdpa_ops,
> -  2 * mlx5_vdpa_max_qps(max_vqs), 1);
> +  2 * mlx5_vdpa_max_qps(max_vqs), 1, 1);
>   if (IS_ERR(ndev))
>   return ndev;
>  
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
> index 46399746ec7c..05195fa7865d 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -63,6 +63,7 @@ static void vdpa_release_dev(struct device *d)
>   * @config: the bus operations that is supported by this device
>   * @nvqs: number of virtqueues supported by this device
>   * @ngroups: number of groups supported by this device
> + * @nas: number of address spaces supported by this device
>   * @size: size of the parent structure that contains private data
>   *
>   * Driver should use vdpa_alloc_device() wrapper macro instead of
> @@ -74,7 +75,7 @@ static void vdpa_release_dev(struct device *d)
>  struct vdpa_device *__vdpa_alloc_device(struct device *parent,
>   const struct vdpa_config_ops *config,
>   int nvqs, unsigned int ngroups,
> - size_t size)
> + unsigned int nas, size_t size)
>  {
>   struct vdpa_device *vdev;
>   int err = -EINVAL;
> @@ -102,6 +103,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device 
> *parent,
>   vdev->features_valid = false;
>   vdev->nvqs = nvqs;
>   vdev->ngroups = ngroups;
> + vdev->nas = nas;
>  
>   err = dev_set_name(>dev, "vdpa%u", vdev->index);
>   if (err)
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c 
> b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> index 6669c561bc6e..5dc04ec271bb 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> @@ -354,7 +354,7 @@ static struct vdpasim *vdpasim_create(void)
>   ops = _net_config_ops;
>  
>   vdpasim = 

Re: [RFC PATCH 09/24] vdpa: multiple address spaces support

2020-10-01 Thread Eli Cohen
On Thu, Sep 24, 2020 at 11:21:10AM +0800, Jason Wang wrote:
> This patches introduces the multiple address spaces support for vDPA
> device. This idea is to identify a specific address space via an
> dedicated identifier - ASID.
> 
> During vDPA device allocation, vDPA device driver needs to report the
> number of address spaces supported by the device then the DMA mapping
> ops of the vDPA device needs to be extended to support ASID.
> 
> This helps to isolate the DMA among the virtqueues. E.g in the case of
> virtio-net, the control virtqueue will not be assigned directly to
> guest.
> 
> This RFC patch only converts for the device that wants its own
> IOMMU/DMA translation logic. So it will rejects the device with more
> that 1 address space that depends on platform IOMMU. The plan to

This is not apparent from the code. Instead you enforce number of groups
to 1.

> moving all the DMA mapping logic to the vDPA device driver instead of
> doing it in vhost-vDPA (otherwise it could result a very complicated
> APIs and actually vhost-vDPA doesn't care about how the actual
> composition/emulation were done in the device driver).
> 
> Signed-off-by: Jason Wang 
> ---
>  drivers/vdpa/ifcvf/ifcvf_main.c   |  2 +-
>  drivers/vdpa/mlx5/net/mlx5_vnet.c |  5 +++--
>  drivers/vdpa/vdpa.c   |  4 +++-
>  drivers/vdpa/vdpa_sim/vdpa_sim.c  | 10 ++
>  drivers/vhost/vdpa.c  | 14 +-
>  include/linux/vdpa.h  | 23 ---
>  6 files changed, 38 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index e6a0be374e51..86cdf5f8bcae 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -440,7 +440,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct 
> pci_device_id *id)
>  
>   adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
>   dev, _vdpa_ops,
> - IFCVF_MAX_QUEUE_PAIRS * 2, 1);
> + IFCVF_MAX_QUEUE_PAIRS * 2, 1, 1);
>  
>   if (adapter == NULL) {
>   IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
> b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 4e480f4f754e..db7404e121bf 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1788,7 +1788,8 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device 
> *vdev)
>   return mvdev->generation;
>  }
>  
> -static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb 
> *iotlb)
> +static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
> +  struct vhost_iotlb *iotlb)
>  {
>   struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
>   struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> @@ -1931,7 +1932,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
>   max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
>  
>   ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, 
> mdev->device, _vdpa_ops,
> -  2 * mlx5_vdpa_max_qps(max_vqs), 1);
> +  2 * mlx5_vdpa_max_qps(max_vqs), 1, 1);
>   if (IS_ERR(ndev))
>   return ndev;
>  
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
> index 46399746ec7c..05195fa7865d 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -63,6 +63,7 @@ static void vdpa_release_dev(struct device *d)
>   * @config: the bus operations that is supported by this device
>   * @nvqs: number of virtqueues supported by this device
>   * @ngroups: number of groups supported by this device
> + * @nas: number of address spaces supported by this device
>   * @size: size of the parent structure that contains private data
>   *
>   * Driver should use vdpa_alloc_device() wrapper macro instead of
> @@ -74,7 +75,7 @@ static void vdpa_release_dev(struct device *d)
>  struct vdpa_device *__vdpa_alloc_device(struct device *parent,
>   const struct vdpa_config_ops *config,
>   int nvqs, unsigned int ngroups,
> - size_t size)
> + unsigned int nas, size_t size)
>  {
>   struct vdpa_device *vdev;
>   int err = -EINVAL;
> @@ -102,6 +103,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device 
> *parent,
>   vdev->features_valid = false;
>   vdev->nvqs = nvqs;
>   vdev->ngroups = ngroups;
> + vdev->nas = nas;
>  
>   err = dev_set_name(>dev, "vdpa%u", vdev->index);
>   if (err)
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c 
> b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> index 6669c561bc6e..5dc04ec271bb 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> @@ -354,7 +354,7 @@ static struct vdpasim 

[RFC PATCH 09/24] vdpa: multiple address spaces support

2020-09-23 Thread Jason Wang
This patches introduces the multiple address spaces support for vDPA
device. This idea is to identify a specific address space via an
dedicated identifier - ASID.

During vDPA device allocation, vDPA device driver needs to report the
number of address spaces supported by the device then the DMA mapping
ops of the vDPA device needs to be extended to support ASID.

This helps to isolate the DMA among the virtqueues. E.g in the case of
virtio-net, the control virtqueue will not be assigned directly to
guest.

This RFC patch only converts for the device that wants its own
IOMMU/DMA translation logic. So it will rejects the device with more
that 1 address space that depends on platform IOMMU. The plan to
moving all the DMA mapping logic to the vDPA device driver instead of
doing it in vhost-vDPA (otherwise it could result a very complicated
APIs and actually vhost-vDPA doesn't care about how the actual
composition/emulation were done in the device driver).

Signed-off-by: Jason Wang 
---
 drivers/vdpa/ifcvf/ifcvf_main.c   |  2 +-
 drivers/vdpa/mlx5/net/mlx5_vnet.c |  5 +++--
 drivers/vdpa/vdpa.c   |  4 +++-
 drivers/vdpa/vdpa_sim/vdpa_sim.c  | 10 ++
 drivers/vhost/vdpa.c  | 14 +-
 include/linux/vdpa.h  | 23 ---
 6 files changed, 38 insertions(+), 20 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index e6a0be374e51..86cdf5f8bcae 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -440,7 +440,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
 
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, _vdpa_ops,
-   IFCVF_MAX_QUEUE_PAIRS * 2, 1);
+   IFCVF_MAX_QUEUE_PAIRS * 2, 1, 1);
 
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 4e480f4f754e..db7404e121bf 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1788,7 +1788,8 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device 
*vdev)
return mvdev->generation;
 }
 
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb 
*iotlb)
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+struct vhost_iotlb *iotlb)
 {
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -1931,7 +1932,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
 
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, 
mdev->device, _vdpa_ops,
-2 * mlx5_vdpa_max_qps(max_vqs), 1);
+2 * mlx5_vdpa_max_qps(max_vqs), 1, 1);
if (IS_ERR(ndev))
return ndev;
 
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 46399746ec7c..05195fa7865d 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -63,6 +63,7 @@ static void vdpa_release_dev(struct device *d)
  * @config: the bus operations that is supported by this device
  * @nvqs: number of virtqueues supported by this device
  * @ngroups: number of groups supported by this device
+ * @nas: number of address spaces supported by this device
  * @size: size of the parent structure that contains private data
  *
  * Driver should use vdpa_alloc_device() wrapper macro instead of
@@ -74,7 +75,7 @@ static void vdpa_release_dev(struct device *d)
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
int nvqs, unsigned int ngroups,
-   size_t size)
+   unsigned int nas, size_t size)
 {
struct vdpa_device *vdev;
int err = -EINVAL;
@@ -102,6 +103,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device 
*parent,
vdev->features_valid = false;
vdev->nvqs = nvqs;
vdev->ngroups = ngroups;
+   vdev->nas = nas;
 
err = dev_set_name(>dev, "vdpa%u", vdev->index);
if (err)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6669c561bc6e..5dc04ec271bb 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -354,7 +354,7 @@ static struct vdpasim *vdpasim_create(void)
ops = _net_config_ops;
 
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
-   VDPASIM_VQ_NUM, 1);
+   VDPASIM_VQ_NUM, 1, 1);
if (!vdpasim)
goto err_alloc;
 
@@ -581,7 +581,7