> From: Jan Beulich <[email protected]>
> Sent: Tuesday, August 24, 2021 10:18 PM
> 
> Generic code will use this information to determine what order values
> can legitimately be passed to the ->{,un}map_page() hooks. For now all
> ops structures simply get to announce 4k mappings (as base page size),
> and there is (and always has been) an assumption that this matches the
> CPU's MMU base page size (eventually we will want to permit IOMMUs with
> a base page size smaller than the CPU MMU's).
> 
> Signed-off-by: Jan Beulich <[email protected]>

Reviewed-by: Kevin Tian <[email protected]>

> 
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -583,6 +583,7 @@ static void amd_dump_page_tables(struct
>  }
> 
>  static const struct iommu_ops __initconstrel _iommu_ops = {
> +    .page_sizes = PAGE_SIZE_4K,
>      .init = amd_iommu_domain_init,
>      .hwdom_init = amd_iommu_hwdom_init,
>      .quarantine_init = amd_iommu_quarantine_init,
> --- a/xen/drivers/passthrough/arm/ipmmu-vmsa.c
> +++ b/xen/drivers/passthrough/arm/ipmmu-vmsa.c
> @@ -1298,6 +1298,7 @@ static void ipmmu_iommu_domain_teardown(
> 
>  static const struct iommu_ops ipmmu_iommu_ops =
>  {
> +    .page_sizes      = PAGE_SIZE_4K,
>      .init            = ipmmu_iommu_domain_init,
>      .hwdom_init      = ipmmu_iommu_hwdom_init,
>      .teardown        = ipmmu_iommu_domain_teardown,
> --- a/xen/drivers/passthrough/arm/smmu.c
> +++ b/xen/drivers/passthrough/arm/smmu.c
> @@ -2873,6 +2873,7 @@ static void arm_smmu_iommu_domain_teardo
>  }
> 
>  static const struct iommu_ops arm_smmu_iommu_ops = {
> +    .page_sizes = PAGE_SIZE_4K,
>      .init = arm_smmu_iommu_domain_init,
>      .hwdom_init = arm_smmu_iommu_hwdom_init,
>      .add_device = arm_smmu_dt_add_device_generic,
> --- a/xen/drivers/passthrough/arm/smmu-v3.c
> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
> @@ -3426,7 +3426,8 @@ static void arm_smmu_iommu_xen_domain_te
>  }
> 
>  static const struct iommu_ops arm_smmu_iommu_ops = {
> -     .init           = arm_smmu_iommu_xen_domain_init,
> +     .page_sizes             = PAGE_SIZE_4K,
> +     .init                   = arm_smmu_iommu_xen_domain_init,
>       .hwdom_init             = arm_smmu_iommu_hwdom_init,
>       .teardown               =
> arm_smmu_iommu_xen_domain_teardown,
>       .iotlb_flush            = arm_smmu_iotlb_flush,
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -470,7 +470,17 @@ int __init iommu_setup(void)
> 
>      if ( iommu_enable )
>      {
> +        const struct iommu_ops *ops = NULL;
> +
>          rc = iommu_hardware_setup();
> +        if ( !rc )
> +            ops = iommu_get_ops();
> +        if ( ops && (ops->page_sizes & -ops->page_sizes) != PAGE_SIZE )
> +        {
> +            printk(XENLOG_ERR "IOMMU: page size mask %lx unsupported\n",
> +                   ops->page_sizes);
> +            rc = ops->page_sizes ? -EPERM : -ENODATA;
> +        }
>          iommu_enabled = (rc == 0);
>      }
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -2875,6 +2875,7 @@ static int __init intel_iommu_quarantine
>  }
> 
>  static struct iommu_ops __initdata vtd_ops = {
> +    .page_sizes = PAGE_SIZE_4K,
>      .init = intel_iommu_domain_init,
>      .hwdom_init = intel_iommu_hwdom_init,
>      .quarantine_init = intel_iommu_quarantine_init,
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -231,6 +231,7 @@ struct page_info;
>  typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void
> *ctxt);
> 
>  struct iommu_ops {
> +    unsigned long page_sizes;
>      int (*init)(struct domain *d);
>      void (*hwdom_init)(struct domain *d);
>      int (*quarantine_init)(struct domain *d);

Reply via email to