On Fri, Apr 23, 2021 at 03:21:41PM +0530, Vivek Gautam wrote:
> Add a feature flag to virtio iommu for Shared virtual addressing
> (SVA). This feature would indicate the availablily path for handling
> device page faults, and the provision for sending page response.
In this case the feature should probably be called PAGE_REQUEST or
similar. SVA aggregates PF + PASID + shared page tables
Thanks,
Jean
> Also add necessary methods to enable and disable SVA so that the
> masters can enable the SVA path. This also requires enabling the
> PRI capability on the device.
>
> Signed-off-by: Vivek Gautam
> ---
> drivers/iommu/virtio-iommu.c | 268 ++
> include/uapi/linux/virtio_iommu.h | 1 +
> 2 files changed, 269 insertions(+)
>
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index 3da5f0807711..250c137a211b 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -18,6 +18,7 @@
> #include
> #include
> #include
> +#include
> #include
> #include
> #include
> @@ -26,6 +27,7 @@
>
> #include
> #include "iommu-pasid-table.h"
> +#include "iommu-sva-lib.h"
>
> #define MSI_IOVA_BASE0x800
> #define MSI_IOVA_LENGTH 0x10
> @@ -37,6 +39,9 @@
> /* Some architectures need an Address Space ID for each page table */
> DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
>
> +static DEFINE_MUTEX(sva_lock);
> +static DEFINE_MUTEX(iopf_lock);
> +
> struct viommu_dev_pri_work {
> struct work_struct work;
> struct viommu_dev *dev;
> @@ -71,6 +76,7 @@ struct viommu_dev {
>
> boolhas_map:1;
> boolhas_table:1;
> + boolhas_sva:1;
> };
>
> struct viommu_mapping {
> @@ -124,6 +130,12 @@ struct viommu_endpoint {
> void*pstf;
> /* Preferred page table format */
> void*pgtf;
> +
> + /* sva */
> + boolats_supported;
> + boolpri_supported;
> + boolsva_enabled;
> + booliopf_enabled;
> };
>
> struct viommu_ep_entry {
> @@ -582,6 +594,64 @@ static int viommu_add_pstf(struct viommu_endpoint *vdev,
> void *pstf, size_t len)
> return 0;
> }
>
> +static int viommu_init_ats_pri(struct viommu_endpoint *vdev)
> +{
> + struct device *dev = vdev->dev;
> + struct pci_dev *pdev = to_pci_dev(dev);
> +
> + if (!dev_is_pci(vdev->dev))
> + return -EINVAL;
> +
> + if (pci_ats_supported(pdev))
> + vdev->ats_supported = true;
> +
> + if (pci_pri_supported(pdev))
> + vdev->pri_supported = true;
> +
> + return 0;
> +}
> +
> +static int viommu_enable_pri(struct viommu_endpoint *vdev)
> +{
> + int ret;
> + struct pci_dev *pdev;
> +
> + /* Let's allow only 4 requests for PRI right now */
> + size_t max_inflight_pprs = 4;
> +
> + if (!vdev->pri_supported || !vdev->ats_supported)
> + return -ENODEV;
> +
> + pdev = to_pci_dev(vdev->dev);
> +
> + ret = pci_reset_pri(pdev);
> + if (ret)
> + return ret;
> +
> + ret = pci_enable_pri(pdev, max_inflight_pprs);
> + if (ret) {
> + dev_err(vdev->dev, "cannot enable PRI: %d\n", ret);
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +static void viommu_disable_pri(struct viommu_endpoint *vdev)
> +{
> + struct pci_dev *pdev;
> +
> + if (!dev_is_pci(vdev->dev))
> + return;
> +
> + pdev = to_pci_dev(vdev->dev);
> +
> + if (!pdev->pri_enabled)
> + return;
> +
> + pci_disable_pri(pdev);
> +}
> +
> static int viommu_init_queues(struct viommu_dev *viommu)
> {
> viommu->iopf_pri = iopf_queue_alloc(dev_name(viommu->dev));
> @@ -684,6 +754,10 @@ static int viommu_probe_endpoint(struct viommu_dev
> *viommu, struct device *dev)
> if (ret)
> goto out_free_eps;
>
> + ret = viommu_init_ats_pri(vdev);
> + if (ret)
> + goto out_free_eps;
> +
> kfree(probe);
> return 0;
>
> @@ -1681,6 +1755,194 @@ static int viommu_of_xlate(struct device *dev, struct
> of_phandle_args *args)
> return iommu_fwspec_add_ids(dev, args->args, 1);
> }
>
> +static bool viommu_endpoint_iopf_supported(struct viommu_endpoint *vdev)
> +{
> + /* TODO: support Stall model later */
> + return vdev->pri_supported;
> +}
> +
> +bool viommu_endpoint_sva_supported(struct viommu_endpoint *vdev)
> +{
> + struct viommu_dev *viommu = vdev->viommu;
> +
> + if (!viommu->has_sva)
> + return false;
> +
> + return vdev->pasid_bits;
> +}
> +
> +bool viommu_endpoint_sva_enabled(struct viommu_endpoint *vdev)
> +{
> + bool enabled;
> +
> + mutex_lock(_lock);
> +