Re: [PATCH RFC v1 13/15] iommu/virtio: Attach Arm PASID tables when available
On Fri, Mar 12, 2021 at 06:59:17PM +0530, Vivek Kumar Gautam wrote: > > > + /* XXX HACK: set feature bit ARM_SMMU_FEAT_2_LVL_CDTAB */ > > > + pst_cfg->vendor.cfg.feat_flag |= (1 << 1); > > > > Oh right, this flag is missing. I'll add > > > >#define VIRTIO_IOMMU_PST_ARM_SMMU3_F_CD2L (1ULL << 1) > > > > to the spec. > > Regarding this Eric pointed out [1] in my other patch about the scalability > of the approach where we keep adding flags in 'iommu_nesting_info' > corresponding to the arm-smmu-v3 capabilities. I guess the same goes to > these flags in virtio. > May be the 'iommu_nesting_info' can have a bitmap with the caps for vendor > specific features, and here we can add the related flags? Something like that, but I'd keep separate arch-specific structs. Vt-d reports the capability registers directly through iommu_nesting_info [2]. We could do the same for Arm, copy sanitized values of IDR0..5 into struct iommu_nesting_info_arm_smmuv3. I've avoided doing that for virtio-iommu because every field needs a description in the spec. So where possible I used generic properties that apply to any architecture, such as page, PASID and address size. What's left is the minimum arch-specific information to get nested translation going, leaving out a lot of properties such as big-endian and 32-bit, which can be added later if needed. The Arm specific properties are split into page table and pasid table information. Page table info should work for both SMMUv2 and v3 (where they correspond to an SMMU_IDRx field that constrains a context descriptor field.) I should move BTM in there since it's supported by SMMUv2. Thanks, Jean [2] https://lore.kernel.org/linux-iommu/20210302203545.436623-11-yi.l@intel.com/ ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [PATCH RFC v1 13/15] iommu/virtio: Attach Arm PASID tables when available
On 3/3/21 10:55 PM, Jean-Philippe Brucker wrote: On Fri, Jan 15, 2021 at 05:43:40PM +0530, Vivek Gautam wrote: [...] +static int viommu_setup_pgtable(struct viommu_endpoint *vdev, + struct viommu_domain *vdomain) +{ + int ret, id; + u32 asid; + enum io_pgtable_fmt fmt; + struct io_pgtable_ops *ops = NULL; + struct viommu_dev *viommu = vdev->viommu; + struct virtio_iommu_probe_table_format *desc = vdev->pgtf; + struct iommu_pasid_table *tbl = vdomain->pasid_tbl; + struct iommu_vendor_psdtable_cfg *pst_cfg; + struct arm_smmu_cfg_info *cfgi; + struct io_pgtable_cfg cfg = { + .iommu_dev = viommu->dev->parent, + .tlb= _flush_ops, + .pgsize_bitmap = vdev->pgsize_mask ? vdev->pgsize_mask : + vdomain->domain.pgsize_bitmap, + .ias= (vdev->input_end ? ilog2(vdev->input_end) : + ilog2(vdomain->domain.geometry.aperture_end)) + 1, + .oas= vdev->output_bits, + }; + + if (!desc) + return -EINVAL; + + if (!vdev->output_bits) + return -ENODEV; + + switch (le16_to_cpu(desc->format)) { + case VIRTIO_IOMMU_FOMRAT_PGTF_ARM_LPAE: + fmt = ARM_64_LPAE_S1; + break; + default: + dev_err(vdev->dev, "unsupported page table format 0x%x\n", + le16_to_cpu(desc->format)); + return -EINVAL; + } + + if (vdomain->mm.ops) { + /* +* TODO: attach additional endpoint to the domain. Check that +* the config is sane. +*/ + return -EEXIST; + } + + vdomain->mm.domain = vdomain; + ops = alloc_io_pgtable_ops(fmt, , >mm); + if (!ops) + return -ENOMEM; + + pst_cfg = >cfg; + cfgi = _cfg->vendor.cfg; + id = ida_simple_get(_ida, 1, 1 << desc->asid_bits, GFP_KERNEL); + if (id < 0) { + ret = id; + goto err_free_pgtable; + } + + asid = id; + ret = iommu_psdtable_prepare(tbl, pst_cfg, , asid); + if (ret) + goto err_free_asid; + + /* +* Strange to setup an op here? +* cd-lib is the actual user of sync op, and therefore the platform +* drivers should assign this sync/maintenance ops as per need. +*/ + tbl->ops->sync = viommu_flush_pasid; But this function deals with page tables, not pasid tables. As said on an earlier patch, the TLB flush ops should probably be passed during table registration - those ops are global so should really be const. Right, will amend it. + + /* Right now only PASID 0 supported ?? */ + ret = iommu_psdtable_write(tbl, pst_cfg, 0, >s1_cfg->cd); + if (ret) + goto err_free_asid; + + vdomain->mm.ops = ops; + dev_dbg(vdev->dev, "using page table format 0x%x\n", fmt); + + return 0; + +err_free_asid: + ida_simple_remove(_ida, asid); +err_free_pgtable: + free_io_pgtable_ops(ops); + return ret; +} + +static int viommu_config_arm_pst(struct iommu_vendor_psdtable_cfg *pst_cfg, +struct virtio_iommu_req_attach_pst_arm *req) +{ + struct arm_smmu_s1_cfg *s1_cfg = pst_cfg->vendor.cfg.s1_cfg; + + if (!s1_cfg) + return -ENODEV; + + req->format = cpu_to_le16(VIRTIO_IOMMU_FORMAT_PSTF_ARM_SMMU_V3); + req->s1fmt = s1_cfg->s1fmt; + req->s1dss = VIRTIO_IOMMU_PSTF_ARM_SMMU_V3_DSS_0; + req->s1contextptr = cpu_to_le64(pst_cfg->base); + req->s1cdmax = cpu_to_le32(s1_cfg->s1cdmax); + + return 0; +} + +static int viommu_config_pst(struct iommu_vendor_psdtable_cfg *pst_cfg, +void *req, enum pasid_table_fmt fmt) +{ + int ret; + + switch (fmt) { + case PASID_TABLE_ARM_SMMU_V3: + ret = viommu_config_arm_pst(pst_cfg, req); + break; + default: + ret = -EINVAL; + WARN_ON(1); + } + + return ret; +} + +static int viommu_prepare_arm_pst(struct viommu_endpoint *vdev, + struct iommu_vendor_psdtable_cfg *pst_cfg) +{ + struct virtio_iommu_probe_table_format *pgtf = vdev->pgtf; + struct arm_smmu_cfg_info *cfgi = _cfg->vendor.cfg; + struct arm_smmu_s1_cfg *cfg; + + /* Some sanity checks */ + if (pgtf->asid_bits != 8 && pgtf->asid_bits != 16) + return -EINVAL; No need for this, next patch cheks asid size in viommu_config_arm_pgt() Right, thanks for catching. + + cfg = devm_kzalloc(pst_cfg->iommu_dev, sizeof(cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + cfgi->s1_cfg = cfg; + cfg->s1cdmax =
Re: [PATCH RFC v1 13/15] iommu/virtio: Attach Arm PASID tables when available
On Fri, Jan 15, 2021 at 05:43:40PM +0530, Vivek Gautam wrote: [...] > +static int viommu_setup_pgtable(struct viommu_endpoint *vdev, > + struct viommu_domain *vdomain) > +{ > + int ret, id; > + u32 asid; > + enum io_pgtable_fmt fmt; > + struct io_pgtable_ops *ops = NULL; > + struct viommu_dev *viommu = vdev->viommu; > + struct virtio_iommu_probe_table_format *desc = vdev->pgtf; > + struct iommu_pasid_table *tbl = vdomain->pasid_tbl; > + struct iommu_vendor_psdtable_cfg *pst_cfg; > + struct arm_smmu_cfg_info *cfgi; > + struct io_pgtable_cfg cfg = { > + .iommu_dev = viommu->dev->parent, > + .tlb= _flush_ops, > + .pgsize_bitmap = vdev->pgsize_mask ? vdev->pgsize_mask : > + vdomain->domain.pgsize_bitmap, > + .ias= (vdev->input_end ? ilog2(vdev->input_end) : > + > ilog2(vdomain->domain.geometry.aperture_end)) + 1, > + .oas= vdev->output_bits, > + }; > + > + if (!desc) > + return -EINVAL; > + > + if (!vdev->output_bits) > + return -ENODEV; > + > + switch (le16_to_cpu(desc->format)) { > + case VIRTIO_IOMMU_FOMRAT_PGTF_ARM_LPAE: > + fmt = ARM_64_LPAE_S1; > + break; > + default: > + dev_err(vdev->dev, "unsupported page table format 0x%x\n", > + le16_to_cpu(desc->format)); > + return -EINVAL; > + } > + > + if (vdomain->mm.ops) { > + /* > + * TODO: attach additional endpoint to the domain. Check that > + * the config is sane. > + */ > + return -EEXIST; > + } > + > + vdomain->mm.domain = vdomain; > + ops = alloc_io_pgtable_ops(fmt, , >mm); > + if (!ops) > + return -ENOMEM; > + > + pst_cfg = >cfg; > + cfgi = _cfg->vendor.cfg; > + id = ida_simple_get(_ida, 1, 1 << desc->asid_bits, GFP_KERNEL); > + if (id < 0) { > + ret = id; > + goto err_free_pgtable; > + } > + > + asid = id; > + ret = iommu_psdtable_prepare(tbl, pst_cfg, , asid); > + if (ret) > + goto err_free_asid; > + > + /* > + * Strange to setup an op here? > + * cd-lib is the actual user of sync op, and therefore the platform > + * drivers should assign this sync/maintenance ops as per need. > + */ > + tbl->ops->sync = viommu_flush_pasid; But this function deals with page tables, not pasid tables. As said on an earlier patch, the TLB flush ops should probably be passed during table registration - those ops are global so should really be const. > + > + /* Right now only PASID 0 supported ?? */ > + ret = iommu_psdtable_write(tbl, pst_cfg, 0, >s1_cfg->cd); > + if (ret) > + goto err_free_asid; > + > + vdomain->mm.ops = ops; > + dev_dbg(vdev->dev, "using page table format 0x%x\n", fmt); > + > + return 0; > + > +err_free_asid: > + ida_simple_remove(_ida, asid); > +err_free_pgtable: > + free_io_pgtable_ops(ops); > + return ret; > +} > + > +static int viommu_config_arm_pst(struct iommu_vendor_psdtable_cfg *pst_cfg, > + struct virtio_iommu_req_attach_pst_arm *req) > +{ > + struct arm_smmu_s1_cfg *s1_cfg = pst_cfg->vendor.cfg.s1_cfg; > + > + if (!s1_cfg) > + return -ENODEV; > + > + req->format = cpu_to_le16(VIRTIO_IOMMU_FORMAT_PSTF_ARM_SMMU_V3); > + req->s1fmt = s1_cfg->s1fmt; > + req->s1dss = VIRTIO_IOMMU_PSTF_ARM_SMMU_V3_DSS_0; > + req->s1contextptr = cpu_to_le64(pst_cfg->base); > + req->s1cdmax= cpu_to_le32(s1_cfg->s1cdmax); > + > + return 0; > +} > + > +static int viommu_config_pst(struct iommu_vendor_psdtable_cfg *pst_cfg, > + void *req, enum pasid_table_fmt fmt) > +{ > + int ret; > + > + switch (fmt) { > + case PASID_TABLE_ARM_SMMU_V3: > + ret = viommu_config_arm_pst(pst_cfg, req); > + break; > + default: > + ret = -EINVAL; > + WARN_ON(1); > + } > + > + return ret; > +} > + > +static int viommu_prepare_arm_pst(struct viommu_endpoint *vdev, > + struct iommu_vendor_psdtable_cfg *pst_cfg) > +{ > + struct virtio_iommu_probe_table_format *pgtf = vdev->pgtf; > + struct arm_smmu_cfg_info *cfgi = _cfg->vendor.cfg; > + struct arm_smmu_s1_cfg *cfg; > + > + /* Some sanity checks */ > + if (pgtf->asid_bits != 8 && pgtf->asid_bits != 16) > + return -EINVAL; No need for this, next patch cheks asid size in viommu_config_arm_pgt() > + > + cfg = devm_kzalloc(pst_cfg->iommu_dev, sizeof(cfg), GFP_KERNEL); > + if (!cfg) > + return -ENOMEM; > + > + cfgi->s1_cfg = cfg; > + cfg->s1cdmax = vdev->pasid_bits; > + cfg->cd.asid =
[PATCH RFC v1 13/15] iommu/virtio: Attach Arm PASID tables when available
From: Jean-Philippe Brucker When the ARM PASID table format is reported in a probe, send an attach request and install the page tables for iommu_map/iommu_unmap use. Architecture-specific components are already abstracted to libraries. We just need to pass config bits around and setup an alternative mechanism to the mapping tree. We reuse the convention already adopted by other IOMMU architectures (ARM SMMU and AMD IOMMU), that entry 0 in the PASID table is reserved for non-PASID traffic. Bind the PASID table, and setup entry 0 to be modified with iommu_map/unmap. Signed-off-by: Jean-Philippe Brucker [Vivek: Bunch of refactoring and clean-ups to use iommu-pasid-table APIs, creating iommu_pasid_table, and configuring based on reported pasid format. Couple of additional methods have also been created to configure vendor specific pasid configuration] Signed-off-by: Vivek Gautam Cc: Joerg Roedel Cc: Will Deacon Cc: Michael S. Tsirkin Cc: Robin Murphy Cc: Jean-Philippe Brucker Cc: Eric Auger Cc: Alex Williamson Cc: Kevin Tian Cc: Jacob Pan Cc: Liu Yi L Cc: Lorenzo Pieralisi Cc: Shameerali Kolothum Thodi --- drivers/iommu/virtio-iommu.c | 314 +++ 1 file changed, 314 insertions(+) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 004ea94e3731..b5222da1dc74 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -25,6 +25,7 @@ #include #include +#include "iommu-pasid-table.h" #define MSI_IOVA_BASE 0x800 #define MSI_IOVA_LENGTH0x10 @@ -33,6 +34,9 @@ #define VIOMMU_EVENT_VQ1 #define VIOMMU_NR_VQS 2 +/* Some architectures need an Address Space ID for each page table */ +static DEFINE_IDA(asid_ida); + struct viommu_dev { struct iommu_device iommu; struct device *dev; @@ -55,6 +59,7 @@ struct viommu_dev { u32 probe_size; boolhas_map:1; + boolhas_table:1; }; struct viommu_mapping { @@ -76,6 +81,7 @@ struct viommu_domain { struct mutexmutex; /* protects viommu pointer */ unsigned intid; u32 map_flags; + struct iommu_pasid_table*pasid_tbl; /* Default address space when a table is bound */ struct viommu_mmmm; @@ -891,6 +897,285 @@ static int viommu_simple_attach(struct viommu_domain *vdomain, return ret; } +static int viommu_teardown_pgtable(struct viommu_domain *vdomain) +{ + struct iommu_vendor_psdtable_cfg *pst_cfg; + struct arm_smmu_cfg_info *cfgi; + u32 asid; + + if (!vdomain->mm.ops) + return 0; + + free_io_pgtable_ops(vdomain->mm.ops); + vdomain->mm.ops = NULL; + + if (vdomain->pasid_tbl) { + pst_cfg = >pasid_tbl->cfg; + cfgi = _cfg->vendor.cfg; + asid = cfgi->s1_cfg->cd.asid; + + iommu_psdtable_write(vdomain->pasid_tbl, pst_cfg, 0, NULL); + ida_simple_remove(_ida, asid); + } + + return 0; +} + +static int viommu_setup_pgtable(struct viommu_endpoint *vdev, + struct viommu_domain *vdomain) +{ + int ret, id; + u32 asid; + enum io_pgtable_fmt fmt; + struct io_pgtable_ops *ops = NULL; + struct viommu_dev *viommu = vdev->viommu; + struct virtio_iommu_probe_table_format *desc = vdev->pgtf; + struct iommu_pasid_table *tbl = vdomain->pasid_tbl; + struct iommu_vendor_psdtable_cfg *pst_cfg; + struct arm_smmu_cfg_info *cfgi; + struct io_pgtable_cfg cfg = { + .iommu_dev = viommu->dev->parent, + .tlb= _flush_ops, + .pgsize_bitmap = vdev->pgsize_mask ? vdev->pgsize_mask : + vdomain->domain.pgsize_bitmap, + .ias= (vdev->input_end ? ilog2(vdev->input_end) : + ilog2(vdomain->domain.geometry.aperture_end)) + 1, + .oas= vdev->output_bits, + }; + + if (!desc) + return -EINVAL; + + if (!vdev->output_bits) + return -ENODEV; + + switch (le16_to_cpu(desc->format)) { + case VIRTIO_IOMMU_FOMRAT_PGTF_ARM_LPAE: + fmt = ARM_64_LPAE_S1; + break; + default: + dev_err(vdev->dev, "unsupported page table format 0x%x\n", + le16_to_cpu(desc->format)); + return -EINVAL; + } + + if (vdomain->mm.ops) { + /* +* TODO: attach additional endpoint to the domain. Check that +* the config is sane. +*/ +