Re: [PATCH RFC v1 03/11] iommu/virtio: Handle incoming page faults

2021-10-11 Thread Vivek Kumar Gautam

Hi Jean,


On 10/11/21 2:46 PM, Jean-Philippe Brucker wrote:

Hi Vivek,

On Mon, Oct 11, 2021 at 01:41:15PM +0530, Vivek Gautam wrote:

+ list_for_each_entry(ep, >endpoints, list) {
+ if (ep->eid == endpoint) {
+ vdev = ep->vdev;


I have a question here though -
Is endpoint-ID unique across all the endpoints available per 'viommu_dev' or
per 'viommu_domain'?
If it is per 'viommu_domain' then the above list is also incorrect.
As you pointed to in the patch [1] -
[PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints served
by viommu_dev
I am planning to add endpoint ID into a static global xarray in
viommu_probe_device() as below:

 vdev_for_each_id(i, eid, vdev) {
 ret = xa_insert(_ep_ids, eid, vdev, GFP_KERNEL);
 if (ret)
 goto err_free_dev;
 }

and replace the above list traversal as below:

 xa_lock_irqsave(_ep_ids, flags);
 xa_for_each(_ep_ids, eid, vdev) {
 if (eid == endpoint) {
 ret =
iommu_report_device_fault(vdev->dev, _evt);
 if (ret)
 dev_err(vdev->dev, "Couldn't
handle page request\n");
 }
 }
 xa_unlock_irqrestore(_ep_ids, flags);

But using a global xarray would also be incorrect if the endpointsID are global
across 'viommu_domain'.

I need to find the correct 'viommu_endpoint' to call iommu_report_device_fault()
with the correct device.


The endpoint IDs are only unique across viommu_dev, so a global xarray
wouldn't work but one in viommu_dev would. In vdomain it doesn't work
either because we can't get to the domain from the fault handler without
first finding the endpoint


Thanks. That's easy then. Will have a xarray in viommu_dev and iterate 
over it from the fault handler.


Best regards
Vivek
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 03/11] iommu/virtio: Handle incoming page faults

2021-10-11 Thread Jean-Philippe Brucker
Hi Vivek,

On Mon, Oct 11, 2021 at 01:41:15PM +0530, Vivek Gautam wrote:
> > > + list_for_each_entry(ep, >endpoints, list) {
> > > + if (ep->eid == endpoint) {
> > > + vdev = ep->vdev;
> 
> I have a question here though -
> Is endpoint-ID unique across all the endpoints available per 'viommu_dev' or
> per 'viommu_domain'?
> If it is per 'viommu_domain' then the above list is also incorrect.
> As you pointed to in the patch [1] -
> [PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints served
> by viommu_dev
> I am planning to add endpoint ID into a static global xarray in
> viommu_probe_device() as below:
> 
> vdev_for_each_id(i, eid, vdev) {
> ret = xa_insert(_ep_ids, eid, vdev, GFP_KERNEL);
> if (ret)
> goto err_free_dev;
> }
> 
> and replace the above list traversal as below:
> 
> xa_lock_irqsave(_ep_ids, flags);
> xa_for_each(_ep_ids, eid, vdev) {
> if (eid == endpoint) {
> ret =
> iommu_report_device_fault(vdev->dev, _evt);
> if (ret)
> dev_err(vdev->dev, "Couldn't
> handle page request\n");
> }
> }
> xa_unlock_irqrestore(_ep_ids, flags);
> 
> But using a global xarray would also be incorrect if the endpointsID are 
> global
> across 'viommu_domain'.
> 
> I need to find the correct 'viommu_endpoint' to call 
> iommu_report_device_fault()
> with the correct device.

The endpoint IDs are only unique across viommu_dev, so a global xarray
wouldn't work but one in viommu_dev would. In vdomain it doesn't work
either because we can't get to the domain from the fault handler without
first finding the endpoint

Thanks,
Jean

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 03/11] iommu/virtio: Handle incoming page faults

2021-10-11 Thread Vivek Gautam
Hi Jean,


On Tue, Sep 21, 2021 at 9:33 PM Jean-Philippe Brucker
 wrote:
>
> On Fri, Apr 23, 2021 at 03:21:39PM +0530, Vivek Gautam wrote:
> > Redirect the incoming page faults to the registered fault handler
> > that can take the fault information such as, pasid, page request
> > group-id, address and pasid flags.
> >
> > Signed-off-by: Vivek Gautam 
> > ---
> >  drivers/iommu/virtio-iommu.c  | 80 ++-
> >  include/uapi/linux/virtio_iommu.h |  1 +
> >  2 files changed, 80 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> > index c970f386f031..fd237cad1ce5 100644
> > --- a/drivers/iommu/virtio-iommu.c
> > +++ b/drivers/iommu/virtio-iommu.c
> > @@ -37,6 +37,13 @@
> >  /* Some architectures need an Address Space ID for each page table */
> >  DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
> >
> > +struct viommu_dev_pri_work {
> > + struct work_struct  work;
> > + struct viommu_dev   *dev;
> > + struct virtio_iommu_fault   *vfault;
> > + u32 endpoint;
> > +};
> > +
> >  struct viommu_dev {
> >   struct iommu_device iommu;
> >   struct device   *dev;
> > @@ -49,6 +56,8 @@ struct viommu_dev {
> >   struct list_headrequests;
> >   void*evts;
> >   struct list_headendpoints;
> > + struct workqueue_struct *pri_wq;
> > + struct viommu_dev_pri_work  *pri_work;
>
> IOPF already has a workqueue, so the driver doesn't need one.
> iommu_report_device_fault() should be fast enough to be called from the
> event handler.

Sure, will call iommu_report_device_fault() directly from
viommu_fault_handler().

>
> >
> >   /* Device configuration */
> >   struct iommu_domain_geometrygeometry;
> > @@ -666,6 +675,58 @@ static int viommu_probe_endpoint(struct viommu_dev 
> > *viommu, struct device *dev)
> >   return ret;
> >  }
> >
> > +static void viommu_handle_ppr(struct work_struct *work)
> > +{
> > + struct viommu_dev_pri_work *pwork =
> > + container_of(work, struct 
> > viommu_dev_pri_work, work);
> > + struct viommu_dev *viommu = pwork->dev;
> > + struct virtio_iommu_fault *vfault = pwork->vfault;
> > + struct viommu_endpoint *vdev;
> > + struct viommu_ep_entry *ep;
> > + struct iommu_fault_event fault_evt = {
> > + .fault.type = IOMMU_FAULT_PAGE_REQ,
> > + };
> > + struct iommu_fault_page_request *prq = _evt.fault.prm;
> > +
> > + u32 flags   = le32_to_cpu(vfault->flags);
> > + u32 prq_flags   = le32_to_cpu(vfault->pr_evt_flags);
> > + u32 endpoint= pwork->endpoint;
> > +
> > + memset(prq, 0, sizeof(struct iommu_fault_page_request));
>
> The fault_evt struct is already initialized

Right, I will remove this line.

>
> > + prq->addr = le64_to_cpu(vfault->address);
> > +
> > + if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_LAST_PAGE)
> > + prq->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
> > + if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) {
> > + prq->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> > + prq->pasid = le32_to_cpu(vfault->pasid);
> > + prq->grpid = le32_to_cpu(vfault->grpid);
> > + }
> > +
> > + if (flags & VIRTIO_IOMMU_FAULT_F_READ)
> > + prq->perm |= IOMMU_FAULT_PERM_READ;
> > + if (flags & VIRTIO_IOMMU_FAULT_F_WRITE)
> > + prq->perm |= IOMMU_FAULT_PERM_WRITE;
> > + if (flags & VIRTIO_IOMMU_FAULT_F_EXEC)
> > + prq->perm |= IOMMU_FAULT_PERM_EXEC;
> > + if (flags & VIRTIO_IOMMU_FAULT_F_PRIV)
> > + prq->perm |= IOMMU_FAULT_PERM_PRIV;
> > +
> > + list_for_each_entry(ep, >endpoints, list) {
> > + if (ep->eid == endpoint) {
> > + vdev = ep->vdev;

I have a question here though -
Is endpoint-ID unique across all the endpoints available per 'viommu_dev' or
per 'viommu_domain'?
If it is per 'viommu_domain' then the above list is also incorrect.
As you pointed to in the patch [1] -
[PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints served
by viommu_dev
I am planning to add endpoint ID into a static global xarray in
viommu_probe_device() as below:

vdev_for_each_id(i, eid, vdev) {
ret = xa_insert(_ep_ids, eid, vdev, GFP_KERNEL);
if (ret)
goto err_free_dev;
}

and replace the above list traversal as below:

xa_lock_irqsave(_ep_ids, flags);
xa_for_each(_ep_ids, eid, vdev) {
if (eid == endpoint) {
ret =
iommu_report_device_fault(vdev->dev, _evt);
if (ret)
dev_err(vdev->dev, "Couldn't
handle page request\n");

Re: [PATCH RFC v1 03/11] iommu/virtio: Handle incoming page faults

2021-09-21 Thread Jean-Philippe Brucker
On Fri, Apr 23, 2021 at 03:21:39PM +0530, Vivek Gautam wrote:
> Redirect the incoming page faults to the registered fault handler
> that can take the fault information such as, pasid, page request
> group-id, address and pasid flags.
> 
> Signed-off-by: Vivek Gautam 
> ---
>  drivers/iommu/virtio-iommu.c  | 80 ++-
>  include/uapi/linux/virtio_iommu.h |  1 +
>  2 files changed, 80 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index c970f386f031..fd237cad1ce5 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -37,6 +37,13 @@
>  /* Some architectures need an Address Space ID for each page table */
>  DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
>  
> +struct viommu_dev_pri_work {
> + struct work_struct  work;
> + struct viommu_dev   *dev;
> + struct virtio_iommu_fault   *vfault;
> + u32 endpoint;
> +};
> +
>  struct viommu_dev {
>   struct iommu_device iommu;
>   struct device   *dev;
> @@ -49,6 +56,8 @@ struct viommu_dev {
>   struct list_headrequests;
>   void*evts;
>   struct list_headendpoints;
> + struct workqueue_struct *pri_wq;
> + struct viommu_dev_pri_work  *pri_work;

IOPF already has a workqueue, so the driver doesn't need one.
iommu_report_device_fault() should be fast enough to be called from the
event handler.

>  
>   /* Device configuration */
>   struct iommu_domain_geometrygeometry;
> @@ -666,6 +675,58 @@ static int viommu_probe_endpoint(struct viommu_dev 
> *viommu, struct device *dev)
>   return ret;
>  }
>  
> +static void viommu_handle_ppr(struct work_struct *work)
> +{
> + struct viommu_dev_pri_work *pwork =
> + container_of(work, struct viommu_dev_pri_work, 
> work);
> + struct viommu_dev *viommu = pwork->dev;
> + struct virtio_iommu_fault *vfault = pwork->vfault;
> + struct viommu_endpoint *vdev;
> + struct viommu_ep_entry *ep;
> + struct iommu_fault_event fault_evt = {
> + .fault.type = IOMMU_FAULT_PAGE_REQ,
> + };
> + struct iommu_fault_page_request *prq = _evt.fault.prm;
> +
> + u32 flags   = le32_to_cpu(vfault->flags);
> + u32 prq_flags   = le32_to_cpu(vfault->pr_evt_flags);
> + u32 endpoint= pwork->endpoint;
> +
> + memset(prq, 0, sizeof(struct iommu_fault_page_request));

The fault_evt struct is already initialized

> + prq->addr = le64_to_cpu(vfault->address);
> +
> + if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_LAST_PAGE)
> + prq->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
> + if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) {
> + prq->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> + prq->pasid = le32_to_cpu(vfault->pasid);
> + prq->grpid = le32_to_cpu(vfault->grpid);
> + }
> +
> + if (flags & VIRTIO_IOMMU_FAULT_F_READ)
> + prq->perm |= IOMMU_FAULT_PERM_READ;
> + if (flags & VIRTIO_IOMMU_FAULT_F_WRITE)
> + prq->perm |= IOMMU_FAULT_PERM_WRITE;
> + if (flags & VIRTIO_IOMMU_FAULT_F_EXEC)
> + prq->perm |= IOMMU_FAULT_PERM_EXEC;
> + if (flags & VIRTIO_IOMMU_FAULT_F_PRIV)
> + prq->perm |= IOMMU_FAULT_PERM_PRIV;
> +
> + list_for_each_entry(ep, >endpoints, list) {
> + if (ep->eid == endpoint) {
> + vdev = ep->vdev;
> + break;
> + }
> + }
> +
> + if ((prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) &&
> + (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_NEEDS_PASID))
> + prq->flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
> +
> + if (iommu_report_device_fault(vdev->dev, _evt))
> + dev_err(vdev->dev, "Couldn't handle page request\n");

An error likely means that nobody registered a fault handler, but we could
display a few more details about the fault that would help debug the
endpoint

> +}
> +
>  static int viommu_fault_handler(struct viommu_dev *viommu,
>   struct virtio_iommu_fault *fault)
>  {
> @@ -679,7 +740,13 @@ static int viommu_fault_handler(struct viommu_dev 
> *viommu,
>   u32 pasid   = le32_to_cpu(fault->pasid);
>  
>   if (type == VIRTIO_IOMMU_FAULT_F_PAGE_REQ) {
> - dev_info(viommu->dev, "Page request fault - unhandled\n");
> + dev_info_ratelimited(viommu->dev,
> +  "Page request fault from EP %u\n",
> +  endpoint);

That's rather for debugging the virtio-iommu driver, so should be
dev_dbg() (or removed entirely)

> +
> + viommu->pri_work->vfault = fault;
> + viommu->pri_work->endpoint = endpoint;
> + queue_work(viommu->pri_wq, >pri_work->work);
>

[PATCH RFC v1 03/11] iommu/virtio: Handle incoming page faults

2021-04-23 Thread Vivek Gautam
Redirect the incoming page faults to the registered fault handler
that can take the fault information such as, pasid, page request
group-id, address and pasid flags.

Signed-off-by: Vivek Gautam 
---
 drivers/iommu/virtio-iommu.c  | 80 ++-
 include/uapi/linux/virtio_iommu.h |  1 +
 2 files changed, 80 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index c970f386f031..fd237cad1ce5 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -37,6 +37,13 @@
 /* Some architectures need an Address Space ID for each page table */
 DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
 
+struct viommu_dev_pri_work {
+   struct work_struct  work;
+   struct viommu_dev   *dev;
+   struct virtio_iommu_fault   *vfault;
+   u32 endpoint;
+};
+
 struct viommu_dev {
struct iommu_device iommu;
struct device   *dev;
@@ -49,6 +56,8 @@ struct viommu_dev {
struct list_headrequests;
void*evts;
struct list_headendpoints;
+   struct workqueue_struct *pri_wq;
+   struct viommu_dev_pri_work  *pri_work;
 
/* Device configuration */
struct iommu_domain_geometrygeometry;
@@ -666,6 +675,58 @@ static int viommu_probe_endpoint(struct viommu_dev 
*viommu, struct device *dev)
return ret;
 }
 
+static void viommu_handle_ppr(struct work_struct *work)
+{
+   struct viommu_dev_pri_work *pwork =
+   container_of(work, struct viommu_dev_pri_work, 
work);
+   struct viommu_dev *viommu = pwork->dev;
+   struct virtio_iommu_fault *vfault = pwork->vfault;
+   struct viommu_endpoint *vdev;
+   struct viommu_ep_entry *ep;
+   struct iommu_fault_event fault_evt = {
+   .fault.type = IOMMU_FAULT_PAGE_REQ,
+   };
+   struct iommu_fault_page_request *prq = _evt.fault.prm;
+
+   u32 flags   = le32_to_cpu(vfault->flags);
+   u32 prq_flags   = le32_to_cpu(vfault->pr_evt_flags);
+   u32 endpoint= pwork->endpoint;
+
+   memset(prq, 0, sizeof(struct iommu_fault_page_request));
+   prq->addr = le64_to_cpu(vfault->address);
+
+   if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_LAST_PAGE)
+   prq->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+   if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) {
+   prq->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+   prq->pasid = le32_to_cpu(vfault->pasid);
+   prq->grpid = le32_to_cpu(vfault->grpid);
+   }
+
+   if (flags & VIRTIO_IOMMU_FAULT_F_READ)
+   prq->perm |= IOMMU_FAULT_PERM_READ;
+   if (flags & VIRTIO_IOMMU_FAULT_F_WRITE)
+   prq->perm |= IOMMU_FAULT_PERM_WRITE;
+   if (flags & VIRTIO_IOMMU_FAULT_F_EXEC)
+   prq->perm |= IOMMU_FAULT_PERM_EXEC;
+   if (flags & VIRTIO_IOMMU_FAULT_F_PRIV)
+   prq->perm |= IOMMU_FAULT_PERM_PRIV;
+
+   list_for_each_entry(ep, >endpoints, list) {
+   if (ep->eid == endpoint) {
+   vdev = ep->vdev;
+   break;
+   }
+   }
+
+   if ((prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) &&
+   (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_NEEDS_PASID))
+   prq->flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+
+   if (iommu_report_device_fault(vdev->dev, _evt))
+   dev_err(vdev->dev, "Couldn't handle page request\n");
+}
+
 static int viommu_fault_handler(struct viommu_dev *viommu,
struct virtio_iommu_fault *fault)
 {
@@ -679,7 +740,13 @@ static int viommu_fault_handler(struct viommu_dev *viommu,
u32 pasid   = le32_to_cpu(fault->pasid);
 
if (type == VIRTIO_IOMMU_FAULT_F_PAGE_REQ) {
-   dev_info(viommu->dev, "Page request fault - unhandled\n");
+   dev_info_ratelimited(viommu->dev,
+"Page request fault from EP %u\n",
+endpoint);
+
+   viommu->pri_work->vfault = fault;
+   viommu->pri_work->endpoint = endpoint;
+   queue_work(viommu->pri_wq, >pri_work->work);
return 0;
}
 
@@ -1683,6 +1750,17 @@ static int viommu_probe(struct virtio_device *vdev)
goto err_free_vqs;
}
 
+   viommu->pri_work = kzalloc(sizeof(*viommu->pri_work), GFP_KERNEL);
+   if (!viommu->pri_work)
+   return -ENOMEM;
+
+   viommu->pri_work->dev = viommu;
+
+   INIT_WORK(>pri_work->work, viommu_handle_ppr);
+   viommu->pri_wq = create_singlethread_workqueue("viommu-pri-wq");
+   if (!viommu->pri_wq)
+   return -ENOMEM;
+
viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ |