Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Christoph Hellwig
FYI, this is what I'd do relative to the patch on the dma-ranges
branch.  In fact realizing this makes me want to refactor things a bit
so that the new code can entirely live in the dma-direct code, but please
test this first:


diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index c21893f683b585..072fc42349874d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -35,21 +35,16 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
 #ifndef __arch_pfn_to_dma
 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
 {
-   if (dev) {
-   phys_addr_t paddr = PFN_PHYS(pfn);
-
-   pfn -= PFN_DOWN(dma_offset_from_phys_addr(dev, paddr));
-   }
-   return (dma_addr_t)__pfn_to_bus(pfn);
+   if (!dev)
+   return (dma_addr_t)__pfn_to_bus(pfn);
+   return translate_phys_to_dma(dev, PFN_PHYS(pfn));
 }
 
 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
 {
-   unsigned long pfn = __bus_to_pfn(addr);
-
-   if (dev)
-   pfn += PFN_DOWN(dma_offset_from_dma_addr(dev, addr));
-   return pfn;
+   if (!dev)
+   return __bus_to_pfn(addr);
+   return PFN_DOWN(translate_dma_to_phys(dev, addr));
 }
 
 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 7831ca5b1b5dd6..e624171c4962ad 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -19,12 +19,16 @@ extern unsigned int zone_dma_bits;
 #else
 static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
-   return (dma_addr_t)paddr - dma_offset_from_phys_addr(dev, paddr);
+   if (dev->dma_range_map)
+   return (dma_addr_t)paddr - translate_phys_to_dma(dev, paddr);
+   return (dma_addr_t)paddr;
 }
 
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t 
dev_addr)
+static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t 
dma_addr)
 {
-   return (phys_addr_t)dev_addr + dma_offset_from_dma_addr(dev, dev_addr);
+   if (dev->dma_range_map)
+   return translate_dma_to_phys(dev, dma_addr);
+   return (phys_addr_t)dma_addr;
 }
 #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
 
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4c4646761afee4..3b1ceebb6f2ad5 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -199,29 +199,28 @@ struct bus_dma_region {
 };
 
 #ifdef CONFIG_HAS_DMA
-static inline u64 dma_offset_from_dma_addr(struct device *dev,
-   dma_addr_t dma_addr)
+static inline dma_addr_t translate_phys_to_dma(struct device *dev,
+   phys_addr_t paddr)
 {
-   const struct bus_dma_region *m = dev->dma_range_map;
+   const struct bus_dma_region *m;
 
-   if (m)
-   for (; m->size; m++)
-   if (dma_addr >= m->dma_start &&
-   dma_addr - m->dma_start < m->size)
-   return m->offset;
-   return 0;
+   for (m = dev->dma_range_map; m->size; m++)
+   if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
+   return (dma_addr_t)paddr - m->offset;
+
+   /* make sure dma_capable fails when no translation is available */
+   return DMA_MAPPING_ERROR; 
 }
 
-static inline u64 dma_offset_from_phys_addr(struct device *dev,
-   phys_addr_t paddr)
+static inline phys_addr_t translate_dma_to_phys(struct device *dev,
+   dma_addr_t dma_addr)
 {
-   const struct bus_dma_region *m = dev->dma_range_map;
+   const struct bus_dma_region *m;
+
+   for (m = dev->dma_range_map; m->size; m++)
+   if (dma_addr >= m->dma_start && dma_addr - m->dma_start < 
m->size)
+   return (phys_addr_t)dma_addr + m->offset;
 
-   if (m)
-   for (; m->size; m++)
-   if (paddr >= m->cpu_start &&
-   paddr - m->cpu_start < m->size)
-   return m->offset;
return 0;
 }
 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Christoph Hellwig
On Tue, Sep 08, 2020 at 09:29:35AM +0200, Christoph Hellwig wrote:
> FYI, this is what I'd do relative to the patch on the dma-ranges
> branch.  In fact realizing this makes me want to refactor things a bit
> so that the new code can entirely live in the dma-direct code, but please
> test this first:

And of course this isn't going to work for arm devices without any
range, so let's try this instead:

diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index c21893f683b585..e913e04d2be8b9 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -35,21 +35,20 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
 #ifndef __arch_pfn_to_dma
 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
 {
-   if (dev) {
-   phys_addr_t paddr = PFN_PHYS(pfn);
-
-   pfn -= PFN_DOWN(dma_offset_from_phys_addr(dev, paddr));
-   }
-   return (dma_addr_t)__pfn_to_bus(pfn);
+   if (!dev)
+   return (dma_addr_t)__pfn_to_bus(pfn);
+   if (dev->dma_range_map)
+   return translate_phys_to_dma(dev, PFN_PHYS(pfn));
+   return (dma_addr_t)PFN_PHYS(pfn);
 }
 
 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
 {
-   unsigned long pfn = __bus_to_pfn(addr);
-
-   if (dev)
-   pfn += PFN_DOWN(dma_offset_from_dma_addr(dev, addr));
-   return pfn;
+   if (!dev)
+   return __bus_to_pfn(addr);
+   if (dev->dma_range_map)
+   return PFN_DOWN(translate_dma_to_phys(dev, addr));
+   return PFN_DOWN(addr);
 }
 
 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 7831ca5b1b5dd6..e624171c4962ad 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -19,12 +19,16 @@ extern unsigned int zone_dma_bits;
 #else
 static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
-   return (dma_addr_t)paddr - dma_offset_from_phys_addr(dev, paddr);
+   if (dev->dma_range_map)
+   return (dma_addr_t)paddr - translate_phys_to_dma(dev, paddr);
+   return (dma_addr_t)paddr;
 }
 
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t 
dev_addr)
+static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t 
dma_addr)
 {
-   return (phys_addr_t)dev_addr + dma_offset_from_dma_addr(dev, dev_addr);
+   if (dev->dma_range_map)
+   return translate_dma_to_phys(dev, dma_addr);
+   return (phys_addr_t)dma_addr;
 }
 #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
 
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4c4646761afee4..3b1ceebb6f2ad5 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -199,29 +199,28 @@ struct bus_dma_region {
 };
 
 #ifdef CONFIG_HAS_DMA
-static inline u64 dma_offset_from_dma_addr(struct device *dev,
-   dma_addr_t dma_addr)
+static inline dma_addr_t translate_phys_to_dma(struct device *dev,
+   phys_addr_t paddr)
 {
-   const struct bus_dma_region *m = dev->dma_range_map;
+   const struct bus_dma_region *m;
 
-   if (m)
-   for (; m->size; m++)
-   if (dma_addr >= m->dma_start &&
-   dma_addr - m->dma_start < m->size)
-   return m->offset;
-   return 0;
+   for (m = dev->dma_range_map; m->size; m++)
+   if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
+   return (dma_addr_t)paddr - m->offset;
+
+   /* make sure dma_capable fails when no translation is available */
+   return DMA_MAPPING_ERROR; 
 }
 
-static inline u64 dma_offset_from_phys_addr(struct device *dev,
-   phys_addr_t paddr)
+static inline phys_addr_t translate_dma_to_phys(struct device *dev,
+   dma_addr_t dma_addr)
 {
-   const struct bus_dma_region *m = dev->dma_range_map;
+   const struct bus_dma_region *m;
+
+   for (m = dev->dma_range_map; m->size; m++)
+   if (dma_addr >= m->dma_start && dma_addr - m->dma_start < 
m->size)
+   return (phys_addr_t)dma_addr + m->offset;
 
-   if (m)
-   for (; m->size; m++)
-   if (paddr >= m->cpu_start &&
-   paddr - m->cpu_start < m->size)
-   return m->offset;
return 0;
 }
 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RESEND v9 08/13] iommu/arm-smmu-v3: Share process page tables

2020-09-08 Thread Auger Eric
Hi Jean,

On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote:
> With Shared Virtual Addressing (SVA), we need to mirror CPU TTBR, TCR,
> MAIR and ASIDs in SMMU contexts. Each SMMU has a single ASID space split
> into two sets, shared and private. Shared ASIDs correspond to those
> obtained from the arch ASID allocator, and private ASIDs are used for
> "classic" map/unmap DMA.
> 
> A possible conflict happens when trying to use a shared ASID that has
> already been allocated for private use by the SMMU driver. This will be
> addressed in a later patch by replacing the private ASID. At the
> moment we return -EBUSY.
> 
> Each mm_struct shared with the SMMU will have a single context
> descriptor. Add a refcount to keep track of this. It will be protected
> by the global SVA lock.
> 
> Introduce a new arm-smmu-v3-sva.c file and the CONFIG_ARM_SMMU_V3_SVA
> option to let users opt in SVA support.
> 
> Signed-off-by: Jean-Philippe Brucker 
> ---
> v9: Move to arm-smmu-v3-sva.c
> ---
>  drivers/iommu/Kconfig |  10 ++
>  drivers/iommu/arm/arm-smmu-v3/Makefile|   5 +-
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h   |   8 ++
>  .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c   | 123 ++
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c   |  34 -
>  5 files changed, 172 insertions(+), 8 deletions(-)
>  create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> 
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index fb1787377eb6..b1d592cd9984 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -313,6 +313,16 @@ config ARM_SMMU_V3
> Say Y here if your system includes an IOMMU device implementing
> the ARM SMMUv3 architecture.
>  
> +config ARM_SMMU_V3_SVA
> + bool "Shared Virtual Addressing support for the ARM SMMUv3"
> + depends on ARM_SMMU_V3
> + help
> +   Support for sharing process address spaces with devices using the
> +   SMMUv3.
> +
> +   Say Y here if your system supports SVA extensions such as PCIe PASID
> +   and PRI.
> +
>  config S390_IOMMU
>   def_bool y if S390 && PCI
>   depends on S390 && PCI
> diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile 
> b/drivers/iommu/arm/arm-smmu-v3/Makefile
> index 569e24e9f162..54feb1ecccad 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/Makefile
> +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
> @@ -1,2 +1,5 @@
>  # SPDX-License-Identifier: GPL-2.0
> -obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
> +obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
> +arm_smmu_v3-objs-y += arm-smmu-v3.o
> +arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
> +arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> index 51a9ce07b2d6..6b06a6f19604 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> @@ -540,6 +540,9 @@ struct arm_smmu_ctx_desc {
>   u64 ttbr;
>   u64 tcr;
>   u64 mair;
> +
> + refcount_t  refs;
> + struct mm_struct*mm;
>  };
>  
>  struct arm_smmu_l1_ctx_desc {
> @@ -672,4 +675,9 @@ struct arm_smmu_domain {
>   spinlock_t  devices_lock;
>  };
>  
> +extern struct xarray arm_smmu_asid_xa;
> +extern struct mutex arm_smmu_asid_lock;
> +
> +bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
> +
>  #endif /* _ARM_SMMU_V3_H */
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> new file mode 100644
> index ..7a4f40565e06
> --- /dev/null
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> @@ -0,0 +1,123 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Implementation of the IOMMU SVA API for the ARM SMMUv3
> + */
> +
> +#include 
> +#include 
> +#include 
> +
> +#include "arm-smmu-v3.h"
> +#include "../../io-pgtable-arm.h"
> +
> +static struct arm_smmu_ctx_desc *
> +arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
> +{
> + struct arm_smmu_ctx_desc *cd;
> +
> + cd = xa_load(&arm_smmu_asid_xa, asid);
> + if (!cd)
> + return NULL;
> +
> + if (cd->mm) {
> + if (WARN_ON(cd->mm != mm))
> + return ERR_PTR(-EINVAL);
> + /* All devices bound to this mm use the same cd struct. */
> + refcount_inc(&cd->refs);
> + return cd;
> + }
> +
> + /* Ouch, ASID is already in use for a private cd. */
> + return ERR_PTR(-EBUSY);
> +}
> +
> +__maybe_unused
> +static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct 
> *mm)
> +{
> + u16 asid;
> + int err = 0;
> + u64 tcr, par, reg;
> + struct arm_smmu_ctx_desc *cd;
> + struct arm_smmu_ctx_desc *ret = NULL;
> +
> + asid = arm64_mm_context_get(mm);
> + if (!as

Re: [PATCH RESEND v9 03/13] iommu/sva: Add PASID helpers

2020-09-08 Thread Auger Eric
Hi Jean,

On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote:
> Let IOMMU drivers allocate a single PASID per mm. Store the mm in the
> IOASID set to allow refcounting and searching mm by PASID, when handling
> an I/O page fault.
> 
> Reviewed-by: Lu Baolu 
> Signed-off-by: Jean-Philippe Brucker 
> ---
>  drivers/iommu/Kconfig |  5 +++
>  drivers/iommu/Makefile|  1 +
>  drivers/iommu/iommu-sva-lib.h | 15 +++
>  drivers/iommu/iommu-sva-lib.c | 85 +++
>  4 files changed, 106 insertions(+)
>  create mode 100644 drivers/iommu/iommu-sva-lib.h
>  create mode 100644 drivers/iommu/iommu-sva-lib.c
> 
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index bef5d75e306b..fb1787377eb6 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -103,6 +103,11 @@ config IOMMU_DMA
>   select IRQ_MSI_IOMMU
>   select NEED_SG_DMA_LENGTH
>  
> +# Shared Virtual Addressing library
> +config IOMMU_SVA_LIB
> + bool
> + select IOASID
> +
>  config FSL_PAMU
>   bool "Freescale IOMMU support"
>   depends on PCI
> diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
> index 11f1771104f3..61bd30cd8369 100644
> --- a/drivers/iommu/Makefile
> +++ b/drivers/iommu/Makefile
> @@ -27,3 +27,4 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
>  obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
>  obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
>  obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
> +obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o
> diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
> new file mode 100644
> index ..b40990aef3fd
> --- /dev/null
> +++ b/drivers/iommu/iommu-sva-lib.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * SVA library for IOMMU drivers
> + */
> +#ifndef _IOMMU_SVA_LIB_H
> +#define _IOMMU_SVA_LIB_H
> +
> +#include 
> +#include 
> +
> +int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
> +void iommu_sva_free_pasid(struct mm_struct *mm);
> +struct mm_struct *iommu_sva_find(ioasid_t pasid);
> +
> +#endif /* _IOMMU_SVA_LIB_H */
> diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
> new file mode 100644
> index ..db7e6c104d6b
> --- /dev/null
> +++ b/drivers/iommu/iommu-sva-lib.c
> @@ -0,0 +1,85 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Helpers for IOMMU drivers implementing SVA
> + */
> +#include 
> +#include 
> +
> +#include "iommu-sva-lib.h"
> +
> +static DEFINE_MUTEX(iommu_sva_lock);
> +static DECLARE_IOASID_SET(iommu_sva_pasid);
> +
> +/**
> + * iommu_sva_alloc_pasid - Allocate a PASID for the mm
> + * @mm: the mm
> + * @min: minimum PASID value (inclusive)
> + * @max: maximum PASID value (inclusive)
> + *
> + * Try to allocate a PASID for this mm, or take a reference to the existing 
> one
> + * provided it fits within the [min, max] range. On success the PASID is
> + * available in mm->pasid, and must be released with iommu_sva_free_pasid().
> + *
> + * Returns 0 on success and < 0 on error.
> + */
> +int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
> +{
> + int ret = 0;
> + ioasid_t pasid;
> +
> + if (min == INVALID_IOASID || max == INVALID_IOASID ||
> + min == 0 || max < min)
you may add a comment explaining why min == 0 is forbidden.
> + return -EINVAL;
> +
> + mutex_lock(&iommu_sva_lock);
> + if (mm->pasid) {
> + if (mm->pasid >= min && mm->pasid <= max)
> + ioasid_get(mm->pasid);
> + else
> + ret = -EOVERFLOW;
> + } else {
> + pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
> + if (pasid == INVALID_IOASID)
> + ret = -ENOMEM;
> + else
> + mm->pasid = pasid;
> + }
> + mutex_unlock(&iommu_sva_lock);
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
> +
> +/**
> + * iommu_sva_free_pasid - Release the mm's PASID
> + * @mm: the mm.
> + *
> + * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid()
> + */
> +void iommu_sva_free_pasid(struct mm_struct *mm)
> +{
> + mutex_lock(&iommu_sva_lock);
> + if (ioasid_put(mm->pasid))
> + mm->pasid = 0;
ditto: 0 versus INVALID_IOASID
> + mutex_unlock(&iommu_sva_lock);
> +}
> +EXPORT_SYMBOL_GPL(iommu_sva_free_pasid);
> +
> +/* ioasid wants a void * argument */
shouldn't it be:
ioasid_find getter() requires a void *arg?
> +static bool __mmget_not_zero(void *mm)
> +{
> + return mmget_not_zero(mm);
> +}
> +
> +/**
> + * iommu_sva_find() - Find mm associated to the given PASID
> + * @pasid: Process Address Space ID assigned to the mm
> + *
> + * On success a reference to the mm is taken, and must be released with 
> mmput().
> + *
> + * Returns the mm corresponding to this PASID, or an error if not found.
> + */
> +struct mm_struct *iommu_sva_find(ioasid_t pasid)

Re: [patch V2 00/46] x86, PCI, XEN, genirq ...: Prepare for device MSI

2020-09-08 Thread Russ Anderson
On Wed, Aug 26, 2020 at 01:16:28PM +0200, Thomas Gleixner wrote:
> This is the second version of providing a base to support device MSI (non
> PCI based) and on top of that support for IMS (Interrupt Message Storm)
> based devices in a halfways architecture independent way.

Booted with quick testing on a 32 socket, 1536 CPU, 12 TB memory
Cascade Lake system and a 8 socket, 144 CPU, 3 TB memory
Cooper Lake system without any obvious regression.


-- 
Russ Anderson,  SuperDome Flex Linux Kernel Group Manager
HPE - Hewlett Packard Enterprise (formerly SGI)  r...@hpe.com
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH V2 5/5] DO NOT MERGE: iommu: disable list appending in dma-iommu

2020-09-08 Thread Lu Baolu

On 2020/9/8 14:23, Christoph Hellwig wrote:

On Tue, Sep 08, 2020 at 02:04:53PM +0800, Lu Baolu wrote:

Do you mind telling where can I find Marek's series?


[PATCH v10 00/30] DRM: fix struct sg_table nents vs. orig_nents misuse

on various lists including the iommu one.



Get it. Thank you!

Best regards,
baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RESEND v9 07/13] iommu/arm-smmu-v3: Move definitions to a header

2020-09-08 Thread Auger Eric
Hi Jean,

On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote:
> Allow sharing structure definitions with the upcoming SVA support for
> Arm SMMUv3, by moving them to a separate header. We could surgically
> extract only what is needed but keeping all definitions in one place
> looks nicer.
> 
> Signed-off-by: Jean-Philippe Brucker 
> ---
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 675 
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 660 +--
>  2 files changed, 676 insertions(+), 659 deletions(-)
>  create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> 
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> new file mode 100644
> index ..51a9ce07b2d6
> --- /dev/null
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> @@ -0,0 +1,675 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * IOMMU API for ARM architected SMMUv3 implementations.
> + *
> + * Copyright (C) 2015 ARM Limited
> + */
> +
> +#ifndef _ARM_SMMU_V3_H
> +#define _ARM_SMMU_V3_H
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +/* MMIO registers */
> +#define ARM_SMMU_IDR00x0
> +#define IDR0_ST_LVL  GENMASK(28, 27)
> +#define IDR0_ST_LVL_2LVL 1
> +#define IDR0_STALL_MODEL GENMASK(25, 24)
> +#define IDR0_STALL_MODEL_STALL   0
> +#define IDR0_STALL_MODEL_FORCE   2
> +#define IDR0_TTENDIANGENMASK(22, 21)
> +#define IDR0_TTENDIAN_MIXED  0
> +#define IDR0_TTENDIAN_LE 2
> +#define IDR0_TTENDIAN_BE 3
> +#define IDR0_CD2L(1 << 19)
> +#define IDR0_VMID16  (1 << 18)
> +#define IDR0_PRI (1 << 16)
> +#define IDR0_SEV (1 << 14)
> +#define IDR0_MSI (1 << 13)
> +#define IDR0_ASID16  (1 << 12)
> +#define IDR0_ATS (1 << 10)
> +#define IDR0_HYP (1 << 9)
> +#define IDR0_COHACC  (1 << 4)
> +#define IDR0_TTF GENMASK(3, 2)
> +#define IDR0_TTF_AARCH64 2
> +#define IDR0_TTF_AARCH32_64  3
> +#define IDR0_S1P (1 << 1)
> +#define IDR0_S2P (1 << 0)
> +
> +#define ARM_SMMU_IDR10x4
> +#define IDR1_TABLES_PRESET   (1 << 30)
> +#define IDR1_QUEUES_PRESET   (1 << 29)
> +#define IDR1_REL (1 << 28)
> +#define IDR1_CMDQS   GENMASK(25, 21)
> +#define IDR1_EVTQS   GENMASK(20, 16)
> +#define IDR1_PRIQS   GENMASK(15, 11)
> +#define IDR1_SSIDSIZEGENMASK(10, 6)
> +#define IDR1_SIDSIZE GENMASK(5, 0)
> +
> +#define ARM_SMMU_IDR30xc
> +#define IDR3_RIL (1 << 10)
> +
> +#define ARM_SMMU_IDR50x14
> +#define IDR5_STALL_MAX   GENMASK(31, 16)
> +#define IDR5_GRAN64K (1 << 6)
> +#define IDR5_GRAN16K (1 << 5)
> +#define IDR5_GRAN4K  (1 << 4)
> +#define IDR5_OAS GENMASK(2, 0)
> +#define IDR5_OAS_32_BIT  0
> +#define IDR5_OAS_36_BIT  1
> +#define IDR5_OAS_40_BIT  2
> +#define IDR5_OAS_42_BIT  3
> +#define IDR5_OAS_44_BIT  4
> +#define IDR5_OAS_48_BIT  5
> +#define IDR5_OAS_52_BIT  6
> +#define IDR5_VAX GENMASK(11, 10)
> +#define IDR5_VAX_52_BIT  1
> +
> +#define ARM_SMMU_CR0 0x20
> +#define CR0_ATSCHK   (1 << 4)
> +#define CR0_CMDQEN   (1 << 3)
> +#define CR0_EVTQEN   (1 << 2)
> +#define CR0_PRIQEN   (1 << 1)
> +#define CR0_SMMUEN   (1 << 0)
> +
> +#define ARM_SMMU_CR0ACK  0x24
> +
> +#define ARM_SMMU_CR1 0x28
> +#define CR1_TABLE_SH GENMASK(11, 10)
> +#define CR1_TABLE_OC GENMASK(9, 8)
> +#define CR1_TABLE_IC GENMASK(7, 6)
> +#define CR1_QUEUE_SH GENMASK(5, 4)
> +#define CR1_QUEUE_OC GENMASK(3, 2)
> +#define CR1_QUEUE_IC GENMASK(1, 0)
> +/* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
> +#define CR1_CACHE_NC 0
> +#define CR1_CACHE_WB 1
> +#define CR1_CACHE_WT 2
> +
> +#define ARM_SMMU_CR2 0x2c
> +#define CR2_PTM  (1 << 2)
> +#define CR2_RECINVSID(1 << 1)
> +#define CR2_E2H  (1 << 0)
> +
> +#define ARM_SMMU_GBPA0x44
> +#define GBPA_UPDATE  (1 << 31)
> +#d

Re: [PATCH RESEND v9 10/13] iommu/arm-smmu-v3: Check for SVA features

2020-09-08 Thread Auger Eric
Hi Jean,
On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote:
> Aggregate all sanity-checks for sharing CPU page tables with the SMMU
> under a single ARM_SMMU_FEAT_SVA bit. For PCIe SVA, users also need to
> check FEAT_ATS and FEAT_PRI. For platform SVA, they will have to check
> FEAT_STALLS.
> 
> Introduce ARM_SMMU_FEAT_BTM (Broadcast TLB Maintenance), but don't
> enable it at the moment. Since the entire VMID space is shared with the
> CPU, enabling DVM (by clearing SMMU_CR2.PTM) could result in
> over-invalidation and affect performance of stage-2 mappings.
In which series do you plan to enable it?
> 
> Cc: Suzuki K Poulose 
> Signed-off-by: Jean-Philippe Brucker 
> ---
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h   | 10 +
>  .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c   | 43 +++
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c   |  3 ++
>  3 files changed, 56 insertions(+)
> 
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> index 90c08f156b43..7b14b48a26c7 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> @@ -602,6 +602,8 @@ struct arm_smmu_device {
>  #define ARM_SMMU_FEAT_STALL_FORCE(1 << 13)
>  #define ARM_SMMU_FEAT_VAX(1 << 14)
>  #define ARM_SMMU_FEAT_RANGE_INV  (1 << 15)
> +#define ARM_SMMU_FEAT_BTM(1 << 16)
> +#define ARM_SMMU_FEAT_SVA(1 << 17)
>   u32 features;
>  
>  #define ARM_SMMU_OPT_SKIP_PREFETCH   (1 << 0)
> @@ -683,4 +685,12 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain 
> *smmu_domain, int ssid,
>  void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
>  bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
>  
> +#ifdef CONFIG_ARM_SMMU_V3_SVA
> +bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
> +#else /* CONFIG_ARM_SMMU_V3_SVA */
> +static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
> +{
> + return false;
> +}
> +#endif /* CONFIG_ARM_SMMU_V3_SVA */
>  #endif /* _ARM_SMMU_V3_H */
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> index e919ce894dd1..bf81d91ce71e 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> @@ -153,3 +153,46 @@ static void arm_smmu_free_shared_cd(struct 
> arm_smmu_ctx_desc *cd)
>   kfree(cd);
>   }
>  }
> +
> +bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
> +{
> + unsigned long reg, fld;
> + unsigned long oas;
> + unsigned long asid_bits;
> +
> + u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
> +
> + if ((smmu->features & feat_mask) != feat_mask)
> + return false;
> +
> + if (!(smmu->pgsize_bitmap & PAGE_SIZE))
> + return false;
If we were to check VA_BITS versus SMMU capabilities I guess this would
be here?
> +
> + /*
> +  * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
> +  * not even pretending to support AArch32 here. Abort if the MMU outputs
> +  * addresses larger than what we support.
> +  */
> + reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
> + fld = cpuid_feature_extract_unsigned_field(reg, 
> ID_AA64MMFR0_PARANGE_SHIFT);
> + oas = id_aa64mmfr0_parange_to_phys_shift(fld);
> + if (smmu->oas < oas)
> + return false;
> +
> + /* We can support bigger ASIDs than the CPU, but not smaller */
> + fld = cpuid_feature_extract_unsigned_field(reg, 
> ID_AA64MMFR0_ASID_SHIFT);
> + asid_bits = fld ? 16 : 8;
> + if (smmu->asid_bits < asid_bits)
> + return false;
> +
> + /*
> +  * See max_pinned_asids in arch/arm64/mm/context.c. The following is
> +  * generally the maximum number of bindable processes.
> +  */
> + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
Out of curiosity, What is the rationale behind using
arm64_kernel_unmapped_at_el0() versus
IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)?
CPU caps being finalized? Is that why you say "generally" here?
> + asid_bits--;
> + dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -> +
> num_possible_cpus() - 2);
nit: s/shared/bindable?
> +
> + return true;
> +}
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index 9e755caea525..15cb3d9c1a5d 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -3258,6 +3258,9 @@ static int arm_smmu_device_hw_probe(struct 
> arm_smmu_device *smmu)
>  
>   smmu->ias = max(smmu->ias, smmu->oas);
>  
> + if (arm_smmu_sva_supported(smmu))
> + smmu->features |= ARM_SMMU_FEAT_SVA;
> +
>   dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
>smmu->ias, smmu->

Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Christoph Hellwig
And because I like replying to myself so much, here is a link to the
version with the arm cleanup patch applied.  Unlike the previous two
attempts this has at least survived very basic sanity testing:

http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/dma-ranges.2

Note that we'll still need to sort out the arm/keystone warnings from
the original patch.  Do we have anyone on the CC list who knows that
platform a little better to figure out if the ifdef solution would work?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RESEND v9 11/13] iommu/arm-smmu-v3: Add SVA device feature

2020-09-08 Thread Auger Eric
Hi Jean,

On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote:
> Implement the IOMMU device feature callbacks to support the SVA feature.
> At the moment dev_has_feat() returns false since I/O Page Faults isn't
> yet implemented.
and because we don't advertise BTM, isn't it?
> 
> Signed-off-by: Jean-Philippe Brucker 
> ---
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h   | 26 ++
>  .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c   | 49 
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c   | 79 +++
>  3 files changed, 154 insertions(+)
> 
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> index 7b14b48a26c7..ba34914813ff 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> @@ -646,6 +646,8 @@ struct arm_smmu_master {
>   u32 *sids;
>   unsigned intnum_sids;
>   boolats_enabled;
> + boolsva_enabled;
> + struct list_headbonds;
>   unsigned intssid_bits;
>  };
>  
> @@ -687,10 +689,34 @@ bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
>  
>  #ifdef CONFIG_ARM_SMMU_V3_SVA
>  bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
> +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
> +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
> +int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
> +int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
>  #else /* CONFIG_ARM_SMMU_V3_SVA */
>  static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
>  {
>   return false;
>  }
> +
> +static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master 
> *master)
> +{
> + return false;
> +}
> +
> +static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master 
> *master)
> +{
> + return false;
> +}
> +
> +static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
> +{
> + return -ENODEV;
> +}
> +
> +static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
> +{
> + return -ENODEV;
> +}
>  #endif /* CONFIG_ARM_SMMU_V3_SVA */
>  #endif /* _ARM_SMMU_V3_H */
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> index bf81d91ce71e..28027620cf2e 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
> @@ -10,6 +10,8 @@
>  #include "arm-smmu-v3.h"
>  #include "../../io-pgtable-arm.h"
>  
> +static DEFINE_MUTEX(sva_lock);
> +
>  /*
>   * Try to reserve this ASID in the SMMU. If it is in use, try to steal it 
> from
>   * the private entry. Careful here, we may be modifying the context tables of
> @@ -196,3 +198,50 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
>  
>   return true;
>  }
> +
> +static bool arm_smmu_iopf_supported(struct arm_smmu_master *master)
> +{
> + return false;
> +}
> +
> +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
> +{
> + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
> + return false;
> +
> + /* SSID and IOPF support are mandatory for the moment */
> + return master->ssid_bits && arm_smmu_iopf_supported(master);
> +}
> +
> +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
> +{
> + bool enabled;
> +
> + mutex_lock(&sva_lock);
> + enabled = master->sva_enabled;
> + mutex_unlock(&sva_lock);
> + return enabled;
> +}
> +
> +int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
> +{
> + mutex_lock(&sva_lock);
> + master->sva_enabled = true;
> + mutex_unlock(&sva_lock);
> +
> + return 0;
> +}
> +
> +int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
> +{
> + mutex_lock(&sva_lock);
> + if (!list_empty(&master->bonds)) {
> + dev_err(master->dev, "cannot disable SVA, device is bound\n");
> + mutex_unlock(&sva_lock);
> + return -EBUSY;
> + }
> + master->sva_enabled = false;
> + mutex_unlock(&sva_lock);
> +
> + return 0;
> +}
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
> b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index 15cb3d9c1a5d..5ed5bb42298f 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -2163,6 +2163,16 @@ static int arm_smmu_attach_dev(struct iommu_domain 
> *domain, struct device *dev)
>   master = dev_iommu_priv_get(dev);
>   smmu = master->smmu;
>  
> + /*
> +  * Checking that SVA is disabled ensures that this device isn't bound to
> +  * any mm, and can be safely detached from its old domain. Bonds cannot
> +  * be removed concurrently since we're holding the group mutex.
> +  */
> + if (arm_smmu_master_sva_enable

Re: [RFC v2 4/5] dt-bindings: of: Add plumbing for restricted DMA pool

2020-09-08 Thread Claire Chang
On Tue, Aug 25, 2020 at 1:30 AM Tomasz Figa  wrote:
>
> On Tue, Aug 11, 2020 at 11:15 AM Tomasz Figa  wrote:
> >
> > On Mon, Aug 3, 2020 at 5:15 PM Tomasz Figa  wrote:
> > >
> > > Hi Claire and Rob,
> > >
> > > On Mon, Aug 3, 2020 at 4:26 PM Claire Chang  wrote:
> > > >
> > > > On Sat, Aug 1, 2020 at 4:58 AM Rob Herring  wrote:
> > > > >
> > > > > On Tue, Jul 28, 2020 at 01:01:39PM +0800, Claire Chang wrote:
> > > > > > Introduce the new compatible string, device-swiotlb-pool, for 
> > > > > > restricted
> > > > > > DMA. One can specify the address and length of the device swiotlb 
> > > > > > memory
> > > > > > region by device-swiotlb-pool in the device tree.
> > > > > >
> > > > > > Signed-off-by: Claire Chang 
> > > > > > ---
> > > > > >  .../reserved-memory/reserved-memory.txt   | 35 
> > > > > > +++
> > > > > >  1 file changed, 35 insertions(+)
> > > > > >
> > > > > > diff --git 
> > > > > > a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
> > > > > >  
> > > > > > b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
> > > > > > index 4dd20de6977f..78850896e1d0 100644
> > > > > > --- 
> > > > > > a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
> > > > > > +++ 
> > > > > > b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
> > > > > > @@ -51,6 +51,24 @@ compatible (optional) - standard definition
> > > > > >used as a shared pool of DMA buffers for a set of 
> > > > > > devices. It can
> > > > > >be used by an operating system to instantiate the 
> > > > > > necessary pool
> > > > > >management subsystem if necessary.
> > > > > > +- device-swiotlb-pool: This indicates a region of memory 
> > > > > > meant to be
> > > > >
> > > > > swiotlb is a Linux thing. The binding should be independent.
> > > > Got it. Thanks for pointing this out.
> > > >
> > > > >
> > > > > > +  used as a pool of device swiotlb buffers for a given 
> > > > > > device. When
> > > > > > +  using this, the no-map and reusable properties must not 
> > > > > > be set, so the
> > > > > > +  operating system can create a virtual mapping that will 
> > > > > > be used for
> > > > > > +  synchronization. Also, there must be a restricted-dma 
> > > > > > property in the
> > > > > > +  device node to specify the indexes of reserved-memory 
> > > > > > nodes. One can
> > > > > > +  specify two reserved-memory nodes in the device tree. 
> > > > > > One with
> > > > > > +  shared-dma-pool to handle the coherent DMA buffer 
> > > > > > allocation, and
> > > > > > +  another one with device-swiotlb-pool for regular DMA 
> > > > > > to/from system
> > > > > > +  memory, which would be subject to bouncing. The main 
> > > > > > purpose for
> > > > > > +  restricted DMA is to mitigate the lack of DMA access 
> > > > > > control on
> > > > > > +  systems without an IOMMU, which could result in the DMA 
> > > > > > accessing the
> > > > > > +  system memory at unexpected times and/or unexpected 
> > > > > > addresses,
> > > > > > +  possibly leading to data leakage or corruption. The 
> > > > > > feature on its own
> > > > > > +  provides a basic level of protection against the DMA 
> > > > > > overwriting buffer
> > > > > > +  contents at unexpected times. However, to protect 
> > > > > > against general data
> > > > > > +  leakage and system memory corruption, the system needs 
> > > > > > to provide a
> > > > > > +  way to restrict the DMA to a predefined memory region.
> > > > >
> > > > > I'm pretty sure we already support per device carveouts and I don't
> > > > > understand how this is different.
> > > > We use this to bounce streaming DMA in and out of a specially allocated 
> > > > region.
> > > > I'll try to merge this with the existing one (i.e., shared-dma-pool)
> > > > to see if that
> > > > makes things clearer.
> > > >
> > >
> > > Indeed, from the firmware point of view, this is just a carveout, for
> > > which we have the "shared-dma-pool" compatible string defined already.
> > >
> > > However, depending on the device and firmware setup, the way the
> > > carevout is used may change. I can see the following scenarios:
> > >
> > > 1) coherent DMA (dma_alloc_*) within a reserved pool and no
> > > non-coherent DMA (dma_map_*).
> > >
> > > This is how the "memory-region" property is handled today in Linux for
> > > devices which can only DMA from/to the given memory region. However,
> > > I'm not sure if no non-coherent DMA is actually enforced in any way by
> > > the DMA subsystem.
> > >
> > > 2) coherent DMA from a reserved pool and non-coherent DMA from system 
> > > memory
> > >
> > > This is the case for the systems which have some dedicated part of
> > > memory which is guaranteed to be coherent with the DMA, but still can
> > > do non-coherent DMA to a

Re: [PATCH RESEND v9 02/13] iommu/ioasid: Add ioasid references

2020-09-08 Thread Auger Eric
Hi Jean,

On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote:
> Let IOASID users take references to existing ioasids with ioasid_get().
> ioasid_put() drops a reference and only frees the ioasid when its
> reference number is zero. It returns true if the ioasid was freed.
> For drivers that don't call ioasid_get(), ioasid_put() is the same as
> ioasid_free().
> 
> Reviewed-by: Lu Baolu 
> Signed-off-by: Jean-Philippe Brucker 
Reviewed-by: Eric Auger 

Thanks

Eric

> ---
>  include/linux/ioasid.h  | 10 --
>  drivers/iommu/intel/iommu.c |  4 ++--
>  drivers/iommu/intel/svm.c   |  6 +++---
>  drivers/iommu/ioasid.c  | 38 +
>  4 files changed, 47 insertions(+), 11 deletions(-)
> 
> diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
> index 6f000d7a0ddc..e9dacd4b9f6b 100644
> --- a/include/linux/ioasid.h
> +++ b/include/linux/ioasid.h
> @@ -34,7 +34,8 @@ struct ioasid_allocator_ops {
>  #if IS_ENABLED(CONFIG_IOASID)
>  ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
> void *private);
> -void ioasid_free(ioasid_t ioasid);
> +void ioasid_get(ioasid_t ioasid);
> +bool ioasid_put(ioasid_t ioasid);
>  void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
> bool (*getter)(void *));
>  int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
> @@ -48,10 +49,15 @@ static inline ioasid_t ioasid_alloc(struct ioasid_set 
> *set, ioasid_t min,
>   return INVALID_IOASID;
>  }
>  
> -static inline void ioasid_free(ioasid_t ioasid)
> +static inline void ioasid_get(ioasid_t ioasid)
>  {
>  }
>  
> +static inline bool ioasid_put(ioasid_t ioasid)
> +{
> + return false;
> +}
> +
>  static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
>   bool (*getter)(void *))
>  {
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index e9864e52b0e9..152fc2dc17e0 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -5149,7 +5149,7 @@ static void auxiliary_unlink_device(struct dmar_domain 
> *domain,
>   domain->auxd_refcnt--;
>  
>   if (!domain->auxd_refcnt && domain->default_pasid > 0)
> - ioasid_free(domain->default_pasid);
> + ioasid_put(domain->default_pasid);
>  }
>  
>  static int aux_domain_add_dev(struct dmar_domain *domain,
> @@ -5210,7 +5210,7 @@ static int aux_domain_add_dev(struct dmar_domain 
> *domain,
>   spin_unlock(&iommu->lock);
>   spin_unlock_irqrestore(&device_domain_lock, flags);
>   if (!domain->auxd_refcnt && domain->default_pasid > 0)
> - ioasid_free(domain->default_pasid);
> + ioasid_put(domain->default_pasid);
>  
>   return ret;
>  }
> diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
> index 95c3164a2302..50897a2bd1da 100644
> --- a/drivers/iommu/intel/svm.c
> +++ b/drivers/iommu/intel/svm.c
> @@ -565,7 +565,7 @@ intel_svm_bind_mm(struct device *dev, int flags, struct 
> svm_dev_ops *ops,
>   if (mm) {
>   ret = mmu_notifier_register(&svm->notifier, mm);
>   if (ret) {
> - ioasid_free(svm->pasid);
> + ioasid_put(svm->pasid);
>   kfree(svm);
>   kfree(sdev);
>   goto out;
> @@ -583,7 +583,7 @@ intel_svm_bind_mm(struct device *dev, int flags, struct 
> svm_dev_ops *ops,
>   if (ret) {
>   if (mm)
>   mmu_notifier_unregister(&svm->notifier, mm);
> - ioasid_free(svm->pasid);
> + ioasid_put(svm->pasid);
>   kfree(svm);
>   kfree(sdev);
>   goto out;
> @@ -652,7 +652,7 @@ static int intel_svm_unbind_mm(struct device *dev, int 
> pasid)
>   kfree_rcu(sdev, rcu);
>  
>   if (list_empty(&svm->devs)) {
> - ioasid_free(svm->pasid);
> + ioasid_put(svm->pasid);
>   if (svm->mm)
>   mmu_notifier_unregister(&svm->notifier, 
> svm->mm);
>   list_del(&svm->list);
> diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> index 0f8dd377aada..50ee27bbd04e 100644
> --- a/drivers/iommu/ioasid.c
> +++ b/drivers/iommu/ioasid.c
> @@ -2,7 +2,7 @@
>  /*
>   * I/O Address Space ID allocator. There is one global IOASID space, split 
> into
>   * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
> - * free IOASIDs with ioasid_alloc and ioasid_free.
> + * free IOASIDs with ioasid_alloc and ioasid_put.
>   */
>  #include 
>  #include 
> @@ -15,6 +15,7 @@ struct ioasid_data {
>   struct ioasid_set *set;
>   void *private;
>   struct rcu_head

Re: [PATCH v11 00/11] PCI: brcmstb: enable PCIe for STB chips

2020-09-08 Thread Lorenzo Pieralisi
On Mon, Sep 07, 2020 at 11:29:06AM -0700, Florian Fainelli wrote:
> 
> 
> On 9/7/2020 10:43 AM, Jim Quinlan wrote:
> > On Mon, Sep 7, 2020 at 5:16 AM Lorenzo Pieralisi
> >  wrote:
> > > 
> > > On Thu, Aug 27, 2020 at 09:29:59AM -0400, Jim Quinlan wrote:
> > > > On Thu, Aug 27, 2020 at 2:35 AM Christoph Hellwig  wrote:
> > > > > 
> > > > > On Tue, Aug 25, 2020 at 10:40:27AM -0700, Florian Fainelli wrote:
> > > > > > Hi,
> > > > > > 
> > > > > > On 8/24/2020 12:30 PM, Jim Quinlan wrote:
> > > > > > > 
> > > > > > > Patchset Summary:
> > > > > > > Enhance a PCIe host controller driver.  Because of its 
> > > > > > > unusual design
> > > > > > > we are foced to change dev->dma_pfn_offset into a more 
> > > > > > > general role
> > > > > > > allowing multiple offsets.  See the 'v1' notes below for more 
> > > > > > > info.
> > > > > > 
> > > > > > We are version 11 and counting, and it is not clear to me whether 
> > > > > > there is
> > > > > > any chance of getting these patches reviewed and hopefully merged 
> > > > > > for the
> > > > > > 5.10 merge window.
> > > > > > 
> > > > > > There are a lot of different files being touched, so what would be 
> > > > > > the
> > > > > > ideal way of routing those changes towards inclusion?
> > > > > 
> > > > > FYI, I offered to take the dma-mapping bits through the dma-mapping 
> > > > > tree.
> > > > > I have a bit of a backlog, but plan to review and if Jim is ok with 
> > > > > that
> > > > > apply the current version.
> > > > Sounds good to me.
> > > 
> > > Hi Jim,
> > > 
> > > is the dependency now solved ? Should we review/take this series as
> > > is for v5.10 through the PCI tree ?
> > Hello Lorenzo,
> > 
> > We are still working out a regression with the DMA offset commit on
> > the RaspberryPi.  Nicolas has found the root cause and we are now
> > devising a solution.
> 
> Maybe we can parallelize the PCIe driver review while the DMA changes
> are being worked on in Christoph's branch. Lorenzo, are you fine with
> the PCIe changes proper?

I will have a look - the main contentious point was about the DMA
changes - if Christoph is happy with them I am OK with them
too - I hope there is not anything controversial in the host
bridge driver itself but I will look into it.

Lorenzo
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Nicolas Saenz Julienne
On Tue, 2020-09-08 at 11:43 +0200, Christoph Hellwig wrote:
> And because I like replying to myself so much, here is a link to the
> version with the arm cleanup patch applied.  Unlike the previous two
> attempts this has at least survived very basic sanity testing:
> 
> http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/dma-ranges.2
> 
> Note that we'll still need to sort out the arm/keystone warnings from
> the original patch.  Do we have anyone on the CC list who knows that
> platform a little better to figure out if the ifdef solution would work?

Had to do the following to boot without errors:

diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index ef61a33c47bc..7dd88a0b6d0b 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -97,6 +97,9 @@ static inline bool dma_capable(struct device *dev, dma_addr_t 
addr, size_t size,
 {
dma_addr_t end = addr + size - 1;
 
+   if (addr == DMA_MAPPING_ERROR)
+   return false;
+
if (!dev->dma_mask)
return false;
 
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 90f1ecb6baaf..25809703a5bf 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -71,7 +71,12 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, 
u64 dma_mask,
 
 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 {
-   return phys_to_dma_direct(dev, phys) + size - 1 <=
+   dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
+
+   if (dma_addr == DMA_MAPPING_ERROR)
+   return false;
+
+   return dma_addr + size - 1 <=
min_not_zero(dev->coherent_dma_mask, 
dev->bus_dma_limit);
 }


Regards,
Nicolas



signature.asc
Description: This is a digitally signed message part
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [PATCH v11 00/11] PCI: brcmstb: enable PCIe for STB chips

2020-09-08 Thread Christoph Hellwig
On Tue, Sep 08, 2020 at 11:42:26AM +0100, Lorenzo Pieralisi wrote:
> > Maybe we can parallelize the PCIe driver review while the DMA changes
> > are being worked on in Christoph's branch. Lorenzo, are you fine with
> > the PCIe changes proper?
> 
> I will have a look - the main contentious point was about the DMA
> changes - if Christoph is happy with them I am OK with them
> too - I hope there is not anything controversial in the host
> bridge driver itself but I will look into it.

I'm pretty happy with the overall shape.  Now we just need to squeeze
out the regressions..
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Christoph Hellwig
On Tue, Sep 08, 2020 at 01:20:56PM +0200, Nicolas Saenz Julienne wrote:
> On Tue, 2020-09-08 at 11:43 +0200, Christoph Hellwig wrote:
> > And because I like replying to myself so much, here is a link to the
> > version with the arm cleanup patch applied.  Unlike the previous two
> > attempts this has at least survived very basic sanity testing:
> > 
> > http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/dma-ranges.2
> > 
> > Note that we'll still need to sort out the arm/keystone warnings from
> > the original patch.  Do we have anyone on the CC list who knows that
> > platform a little better to figure out if the ifdef solution would work?
> 
> Had to do the following to boot without errors:

I've folded in.  That being said the whole RPi4 setup confuses the
heck out of me.  I wonder what thing was smoked to come up with it..
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [patch V2 14/46] x86/ioapic: Consolidate IOAPIC allocation

2020-09-08 Thread Wei Liu
On Wed, Aug 26, 2020 at 01:16:42PM +0200, Thomas Gleixner wrote:
...
> --- a/drivers/iommu/hyperv-iommu.c
> +++ b/drivers/iommu/hyperv-iommu.c
> @@ -101,7 +101,7 @@ static int hyperv_irq_remapping_alloc(st
>* in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_
>* affinity() set vector and dest_apicid directly into IO-APIC entry.
>*/
> - irq_data->chip_data = info->ioapic_entry;
> + irq_data->chip_data = info->ioapic.entry;

Not sure if it is required for such a trivial change but here you go:

Acked-by: Wei Liu 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [patch V2 18/46] x86/msi: Consolidate MSI allocation

2020-09-08 Thread Wei Liu
On Wed, Aug 26, 2020 at 01:16:46PM +0200, Thomas Gleixner wrote:
[...]
> --- a/drivers/pci/controller/pci-hyperv.c
> +++ b/drivers/pci/controller/pci-hyperv.c
> @@ -1534,7 +1534,7 @@ static struct irq_chip hv_msi_irq_chip =
>  static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info 
> *info,
>  msi_alloc_info_t *arg)
>  {
> - return arg->msi_hwirq;
> + return arg->hwirq;
>  }

Acked-by: Wei Liu 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [Intel-gfx] [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api

2020-09-08 Thread Tvrtko Ursulin



Hi,

On 27/08/2020 22:36, Logan Gunthorpe wrote:

On 2020-08-23 6:04 p.m., Tom Murphy wrote:

I have added a check for the sg_dma_len == 0 :
"""
  } __sgt_iter(struct scatterlist *sgl, bool dma) {
 struct sgt_iter s = { .sgp = sgl };

+   if (sgl && sg_dma_len(sgl) == 0)
+   s.sgp = NULL;

 if (s.sgp) {
 .
"""
at location [1].
but it doens't fix the problem.


Based on my read of the code, it looks like we also need to change usage
of sgl->length... Something like the rough patch below, maybe?


This thread was brought to my attention and I initially missed this 
reply. Essentially I came to the same conclusion about the need to use 
sg_dma_len. One small correction below:



Also, Tom, do you have an updated version of the patchset to convert the
Intel IOMMU to dma-iommu available? The last one I've found doesn't
apply cleanly (I'm assuming parts of it have been merged in slightly
modified forms).

Thanks,

Logan

--

diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h
b/drivers/gpu/drm/i915/i915
index b7b59328cb76..9367ac801f0c 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -27,13 +27,19 @@ static __always_inline struct sgt_iter {
  } __sgt_iter(struct scatterlist *sgl, bool dma) {
 struct sgt_iter s = { .sgp = sgl };

+   if (sgl && !sg_dma_len(s.sgp))


I'd extend the condition to be, just to be safe:

if (dma && sgl && !sg_dma_len(s.sgp))


+   s.sgp = NULL;
+
 if (s.sgp) {
 s.max = s.curr = s.sgp->offset;
-   s.max += s.sgp->length;
-   if (dma)
+
+   if (dma) {
+   s.max += sg_dma_len(s.sgp);
 s.dma = sg_dma_address(s.sgp);
-   else
+   } else {
+   s.max += s.sgp->length;
 s.pfn = page_to_pfn(sg_page(s.sgp));
+   }


Otherwise has this been tested or alternatively how to test it? (How to 
repro the issue.)


Regards,

Tvrtko
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [Intel-gfx] [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api

2020-09-08 Thread Logan Gunthorpe


On 2020-09-08 9:28 a.m., Tvrtko Ursulin wrote:
>>
>> diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h
>> b/drivers/gpu/drm/i915/i915
>> index b7b59328cb76..9367ac801f0c 100644
>> --- a/drivers/gpu/drm/i915/i915_scatterlist.h
>> +++ b/drivers/gpu/drm/i915/i915_scatterlist.h
>> @@ -27,13 +27,19 @@ static __always_inline struct sgt_iter {
>>   } __sgt_iter(struct scatterlist *sgl, bool dma) {
>>  struct sgt_iter s = { .sgp = sgl };
>>
>> +   if (sgl && !sg_dma_len(s.sgp))
> 
> I'd extend the condition to be, just to be safe:
> if (dma && sgl && !sg_dma_len(s.sgp))
>

Right, good catch, that's definitely necessary.

>> +   s.sgp = NULL;
>> +
>>  if (s.sgp) {
>>  s.max = s.curr = s.sgp->offset;
>> -   s.max += s.sgp->length;
>> -   if (dma)
>> +
>> +   if (dma) {
>> +   s.max += sg_dma_len(s.sgp);
>>  s.dma = sg_dma_address(s.sgp);
>> -   else
>> +   } else {
>> +   s.max += s.sgp->length;
>>  s.pfn = page_to_pfn(sg_page(s.sgp));
>> +   }
> 
> Otherwise has this been tested or alternatively how to test it? (How to
> repro the issue.)

It has not been tested. To test it, you need Tom's patch set without the
last "DO NOT MERGE" patch:

https://lkml.kernel.org/lkml/20200907070035.ga25...@infradead.org/T/

Thanks,

Logan
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [Intel-gfx] [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api

2020-09-08 Thread Tvrtko Ursulin


On 08/09/2020 16:44, Logan Gunthorpe wrote:

On 2020-09-08 9:28 a.m., Tvrtko Ursulin wrote:


diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h
b/drivers/gpu/drm/i915/i915
index b7b59328cb76..9367ac801f0c 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -27,13 +27,19 @@ static __always_inline struct sgt_iter {
   } __sgt_iter(struct scatterlist *sgl, bool dma) {
  struct sgt_iter s = { .sgp = sgl };

+   if (sgl && !sg_dma_len(s.sgp))


I'd extend the condition to be, just to be safe:
 if (dma && sgl && !sg_dma_len(s.sgp))



Right, good catch, that's definitely necessary.


+   s.sgp = NULL;
+
  if (s.sgp) {
  s.max = s.curr = s.sgp->offset;
-   s.max += s.sgp->length;
-   if (dma)
+
+   if (dma) {
+   s.max += sg_dma_len(s.sgp);
  s.dma = sg_dma_address(s.sgp);
-   else
+   } else {
+   s.max += s.sgp->length;
  s.pfn = page_to_pfn(sg_page(s.sgp));
+   }


Otherwise has this been tested or alternatively how to test it? (How to
repro the issue.)


It has not been tested. To test it, you need Tom's patch set without the
last "DO NOT MERGE" patch:

https://lkml.kernel.org/lkml/20200907070035.ga25...@infradead.org/T/


Tom, do you have a branch somewhere I could pull from? (Just being lazy 
about downloading a bunch of messages from the archives.)


What GPU is in your Lenovo x1 carbon 5th generation and what 
graphical/desktop setup I need to repro?


Regards,

Tvrtko
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Jim Quinlan via iommu
On Tue, Sep 8, 2020 at 5:43 AM Christoph Hellwig  wrote:
>
> And because I like replying to myself so much, here is a link to the
> version with the arm cleanup patch applied.  Unlike the previous two
> attempts this has at least survived very basic sanity testing:
>
> http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/dma-ranges.2
>
Tested-by: Jim Quinlan 

> Note that we'll still need to sort out the arm/keystone warnings from
> the original patch.  Do we have anyone on the CC list who knows that
> platform a little better to figure out if the ifdef solution would work?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 1/2] dma-mapping: remove the dma_dummy_ops export

2020-09-08 Thread Christoph Hellwig
dma_dummy_ops is only used by the ACPI code, which can't be modular.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/dummy.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c
index 05607642c888d9..6974b1bd7d0b88 100644
--- a/kernel/dma/dummy.c
+++ b/kernel/dma/dummy.c
@@ -36,4 +36,3 @@ const struct dma_map_ops dma_dummy_ops = {
.map_sg = dma_dummy_map_sg,
.dma_supported  = dma_dummy_supported,
 };
-EXPORT_SYMBOL(dma_dummy_ops);
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


drop a few unused DMA exports

2020-09-08 Thread Christoph Hellwig
Hi all,

this series removes a bunch of (now) unused exports in the DMA mapping
subsystem.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 2/2] dma-debug: remove all exports

2020-09-08 Thread Christoph Hellwig
Now that the main dma mapping entry points are out of line none
of these functions can be called from modular code.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/debug.c | 12 
 1 file changed, 12 deletions(-)

diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 8e9f7b301c6d39..9e5370e3700c08 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -1199,7 +1199,6 @@ void debug_dma_map_single(struct device *dev, const void 
*addr,
err_printk(dev, NULL, "device driver maps memory from vmalloc 
area [addr=%p] [len=%lu]\n",
   addr, len);
 }
-EXPORT_SYMBOL(debug_dma_map_single);
 
 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
size_t size, int direction, dma_addr_t dma_addr)
@@ -1235,7 +1234,6 @@ void debug_dma_map_page(struct device *dev, struct page 
*page, size_t offset,
 
add_dma_entry(entry);
 }
-EXPORT_SYMBOL(debug_dma_map_page);
 
 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
@@ -1273,7 +1271,6 @@ void debug_dma_mapping_error(struct device *dev, 
dma_addr_t dma_addr)
 
put_hash_bucket(bucket, flags);
 }
-EXPORT_SYMBOL(debug_dma_mapping_error);
 
 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  size_t size, int direction)
@@ -1290,7 +1287,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t 
addr,
return;
check_unmap(&ref);
 }
-EXPORT_SYMBOL(debug_dma_unmap_page);
 
 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  int nents, int mapped_ents, int direction)
@@ -1328,7 +1324,6 @@ void debug_dma_map_sg(struct device *dev, struct 
scatterlist *sg,
add_dma_entry(entry);
}
 }
-EXPORT_SYMBOL(debug_dma_map_sg);
 
 static int get_nr_mapped_entries(struct device *dev,
 struct dma_debug_entry *ref)
@@ -1380,7 +1375,6 @@ void debug_dma_unmap_sg(struct device *dev, struct 
scatterlist *sglist,
check_unmap(&ref);
}
 }
-EXPORT_SYMBOL(debug_dma_unmap_sg);
 
 void debug_dma_alloc_coherent(struct device *dev, size_t size,
  dma_addr_t dma_addr, void *virt)
@@ -1466,7 +1460,6 @@ void debug_dma_map_resource(struct device *dev, 
phys_addr_t addr, size_t size,
 
add_dma_entry(entry);
 }
-EXPORT_SYMBOL(debug_dma_map_resource);
 
 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
  size_t size, int direction)
@@ -1484,7 +1477,6 @@ void debug_dma_unmap_resource(struct device *dev, 
dma_addr_t dma_addr,
 
check_unmap(&ref);
 }
-EXPORT_SYMBOL(debug_dma_unmap_resource);
 
 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
   size_t size, int direction)
@@ -1503,7 +1495,6 @@ void debug_dma_sync_single_for_cpu(struct device *dev, 
dma_addr_t dma_handle,
 
check_sync(dev, &ref, true);
 }
-EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
 
 void debug_dma_sync_single_for_device(struct device *dev,
  dma_addr_t dma_handle, size_t size,
@@ -1523,7 +1514,6 @@ void debug_dma_sync_single_for_device(struct device *dev,
 
check_sync(dev, &ref, false);
 }
-EXPORT_SYMBOL(debug_dma_sync_single_for_device);
 
 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
   int nelems, int direction)
@@ -1556,7 +1546,6 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct 
scatterlist *sg,
check_sync(dev, &ref, true);
}
 }
-EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
 
 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  int nelems, int direction)
@@ -1588,7 +1577,6 @@ void debug_dma_sync_sg_for_device(struct device *dev, 
struct scatterlist *sg,
check_sync(dev, &ref, false);
}
 }
-EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
 
 static int __init dma_debug_driver_setup(char *str)
 {
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 03/12] MIPS/jazzdma: decouple from dma-direct

2020-09-08 Thread Christoph Hellwig
The jazzdma ops implement support for a very basic IOMMU.  Thus we really
should not use the dma-direct code that takes physical address limits
into account.  This survived through the great MIPS DMA ops cleanup mostly
because I was lazy, but now it is time to fully split the implementations.

Signed-off-by: Christoph Hellwig 
Acked-by: Thomas Bogendoerfer 
---
 arch/mips/jazz/jazzdma.c | 32 +---
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index fe40dbed04c1d6..d0b5a2ba2b1a8a 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -16,7 +16,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -492,26 +491,38 @@ int vdma_get_enable(int channel)
 static void *jazz_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
+   struct page *page;
void *ret;
 
-   ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
-   if (!ret)
-   return NULL;
+   if (attrs & DMA_ATTR_NO_WARN)
+   gfp |= __GFP_NOWARN;
 
-   *dma_handle = vdma_alloc(virt_to_phys(ret), size);
-   if (*dma_handle == DMA_MAPPING_ERROR) {
-   dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
+   size = PAGE_ALIGN(size);
+   page = alloc_pages(gfp, get_order(size));
+   if (!page)
return NULL;
-   }
+   ret = page_address(page);
+   *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+   if (*dma_handle == DMA_MAPPING_ERROR)
+   goto out_free_pages;
+
+   if (attrs & DMA_ATTR_NON_CONSISTENT)
+   return ret;
+   arch_dma_prep_coherent(page, size);
+   return (void *)(UNCAC_BASE + __pa(ret));
 
-   return ret;
+out_free_pages:
+   __free_pages(page, get_order(size));
+   return NULL;
 }
 
 static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
 {
vdma_free(dma_handle);
-   dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
+   if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+   vaddr = __va(vaddr - UNCAC_BASE);
+   __free_pages(virt_to_page(vaddr), get_order(size));
 }
 
 static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
@@ -608,7 +619,6 @@ const struct dma_map_ops jazz_dma_ops = {
.sync_single_for_device = jazz_dma_sync_single_for_device,
.sync_sg_for_cpu= jazz_dma_sync_sg_for_cpu,
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
-   .dma_supported  = dma_direct_supported,
.cache_sync = arch_dma_cache_sync,
.mmap   = dma_common_mmap,
.get_sgtable= dma_common_get_sgtable,
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


dma-mapping cleanups

2020-09-08 Thread Christoph Hellwig
Hi all,

this series contains just the cleanup parts from the previous
"a saner API for allocating DMA addressable pages" series.  The
intent is to get this in to reduce the amount of patchbombing
for iterations of the real API work.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 07/12] dma-direct: lift gfp_t manipulation out of__dma_direct_alloc_pages

2020-09-08 Thread Christoph Hellwig
Move the detailed gfp_t setup from __dma_direct_alloc_pages into the
caller to clean things up a little.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/direct.c | 12 +---
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 1d564bea58571b..12e9f5f75dfe4b 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -108,7 +108,7 @@ static inline bool dma_should_free_from_pool(struct device 
*dev,
 }
 
 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
-   gfp_t gfp, unsigned long attrs)
+   gfp_t gfp)
 {
int node = dev_to_node(dev);
struct page *page = NULL;
@@ -116,11 +116,6 @@ static struct page *__dma_direct_alloc_pages(struct device 
*dev, size_t size,
 
WARN_ON_ONCE(!PAGE_ALIGNED(size));
 
-   if (attrs & DMA_ATTR_NO_WARN)
-   gfp |= __GFP_NOWARN;
-
-   /* we always manually zero the memory once we are done: */
-   gfp &= ~__GFP_ZERO;
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
   &phys_limit);
page = dma_alloc_contiguous(dev, size, gfp);
@@ -164,6 +159,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
 
size = PAGE_ALIGN(size);
+   if (attrs & DMA_ATTR_NO_WARN)
+   gfp |= __GFP_NOWARN;
 
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
u64 phys_mask;
@@ -177,7 +174,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
goto done;
}
 
-   page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
+   /* we always manually zero the memory once we are done */
+   page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
 
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 05/12] dma-mapping: add (back) arch_dma_mark_clean for ia64

2020-09-08 Thread Christoph Hellwig
Add back a hook to optimize dcache flushing after reading executable
code using DMA.  This gets ia64 out of the business of pretending to
be dma incoherent just for this optimization.

Signed-off-by: Christoph Hellwig 
---
 arch/ia64/Kconfig   |  3 +--
 arch/ia64/kernel/dma-mapping.c  | 14 +-
 arch/ia64/mm/init.c |  3 +--
 include/linux/dma-direct.h  |  3 +++
 include/linux/dma-noncoherent.h |  8 
 kernel/dma/Kconfig  |  6 ++
 kernel/dma/direct.c |  3 +++
 7 files changed, 23 insertions(+), 17 deletions(-)

diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 5b4ec80bf5863a..513ba0c5d33610 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -8,6 +8,7 @@ menu "Processor type and features"
 
 config IA64
bool
+   select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI
@@ -32,8 +33,6 @@ config IA64
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_VIRT_CPU_ACCOUNTING
-   select DMA_NONCOHERENT_MMAP
-   select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 09ef9ce9988d1f..f640ed6fe1d576 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include 
+#include 
 #include 
 
 /* Set this to 1 if there is a HW IOMMU in the system */
@@ -7,15 +7,3 @@ int iommu_detected __read_mostly;
 
 const struct dma_map_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
-
-void *arch_dma_alloc(struct device *dev, size_t size,
-   dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
-   return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
-   dma_addr_t dma_addr, unsigned long attrs)
-{
-   dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
-}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 0b3fb4c7af2920..02e5aa08294ee0 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -73,8 +73,7 @@ __ia64_sync_icache_dcache (pte_t pte)
  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  * flush them when they get mapped into an executable vm-area.
  */
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-   enum dma_data_direction dir)
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
 {
unsigned long pfn = PHYS_PFN(paddr);
 
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 6e87225600ae35..95e3e28bd93f47 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -150,6 +150,9 @@ static inline void dma_direct_sync_single_for_cpu(struct 
device *dev,
 
if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+
+   if (dir == DMA_FROM_DEVICE)
+   arch_dma_mark_clean(paddr, size);
 }
 
 static inline dma_addr_t dma_direct_map_page(struct device *dev,
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index ca09a4e07d2d3d..b9bc6c557ea46f 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -108,6 +108,14 @@ static inline void arch_dma_prep_coherent(struct page 
*page, size_t size)
 }
 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
 
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
+#else
+static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
+{
+}
+#endif /* ARCH_HAS_DMA_MARK_CLEAN */
+
 void *arch_dma_set_uncached(void *addr, size_t size);
 void arch_dma_clear_uncached(void *addr, size_t size);
 
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index e7b801649f6574..281785feb874db 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -44,6 +44,12 @@ config ARCH_HAS_DMA_SET_MASK
 config ARCH_HAS_DMA_WRITE_COMBINE
bool
 
+#
+# Select if the architectures provides the arch_dma_mark_clean hook
+#
+config ARCH_HAS_DMA_MARK_CLEAN
+   bool
+
 config DMA_DECLARE_COHERENT
bool
 
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index db6ef07aec3b37..949c1cbf08b2d5 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -345,6 +345,9 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
SYNC_FOR_CPU);
+
+   if (dir == DMA_FROM_DEVICE)
+   arch_dma_mark_clean(paddr, sg->length);
}
 
if (!dev_is_dma_coherent(dev))
-- 
2.28.0

___
iommu mailing list
iomm

[PATCH 10/12] dma-direct: rename and cleanup __phys_to_dma

2020-09-08 Thread Christoph Hellwig
The __phys_to_dma vs phys_to_dma distinction isn't exactly obvious.  Try
to improve the situation by renaming __phys_to_dma to
phys_to_dma_unencryped, and not forcing architectures that want to
override phys_to_dma to actually provide __phys_to_dma.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/include/asm/dma-direct.h  |  2 +-
 arch/mips/bmips/dma.c  |  2 +-
 arch/mips/cavium-octeon/dma-octeon.c   |  2 +-
 arch/mips/include/asm/dma-direct.h |  2 +-
 arch/mips/loongson2ef/fuloong-2e/dma.c |  2 +-
 arch/mips/loongson2ef/lemote-2f/dma.c  |  2 +-
 arch/mips/loongson64/dma.c |  2 +-
 arch/mips/pci/pci-ar2315.c |  2 +-
 arch/mips/pci/pci-xtalk-bridge.c   |  2 +-
 arch/mips/sgi-ip32/ip32-dma.c  |  2 +-
 arch/powerpc/include/asm/dma-direct.h  |  2 +-
 drivers/iommu/intel/iommu.c|  2 +-
 include/linux/dma-direct.h | 28 +++---
 kernel/dma/direct.c|  8 
 kernel/dma/swiotlb.c   |  4 ++--
 15 files changed, 34 insertions(+), 30 deletions(-)

diff --git a/arch/arm/include/asm/dma-direct.h 
b/arch/arm/include/asm/dma-direct.h
index a8cee87a93e8ab..bca0de56753439 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -2,7 +2,7 @@
 #ifndef ASM_ARM_DMA_DIRECT_H
 #define ASM_ARM_DMA_DIRECT_H 1
 
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
unsigned int offset = paddr & ~PAGE_MASK;
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index ba2a5d33dfd3fa..49061b870680b9 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges;
 
 #define FLUSH_RAC  0x100
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t pa)
 {
struct bmips_dma_range *r;
 
diff --git a/arch/mips/cavium-octeon/dma-octeon.c 
b/arch/mips/cavium-octeon/dma-octeon.c
index 388b13ba2558c2..232fa1017b1ec9 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -168,7 +168,7 @@ void __init octeon_pci_dma_init(void)
 }
 #endif /* CONFIG_PCI */
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
 #ifdef CONFIG_PCI
if (dev && dev_is_pci(dev))
diff --git a/arch/mips/include/asm/dma-direct.h 
b/arch/mips/include/asm/dma-direct.h
index 8e178651c638c2..9a640118316c9d 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -2,7 +2,7 @@
 #ifndef _MIPS_DMA_DIRECT_H
 #define _MIPS_DMA_DIRECT_H 1
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
 
 #endif /* _MIPS_DMA_DIRECT_H */
diff --git a/arch/mips/loongson2ef/fuloong-2e/dma.c 
b/arch/mips/loongson2ef/fuloong-2e/dma.c
index 83fadeb3fd7d56..cea167d8aba8db 100644
--- a/arch/mips/loongson2ef/fuloong-2e/dma.c
+++ b/arch/mips/loongson2ef/fuloong-2e/dma.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
return paddr | 0x8000;
 }
diff --git a/arch/mips/loongson2ef/lemote-2f/dma.c 
b/arch/mips/loongson2ef/lemote-2f/dma.c
index 302b43a14eee74..3c9e994563578c 100644
--- a/arch/mips/loongson2ef/lemote-2f/dma.c
+++ b/arch/mips/loongson2ef/lemote-2f/dma.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
return paddr | 0x8000;
 }
diff --git a/arch/mips/loongson64/dma.c b/arch/mips/loongson64/dma.c
index b3dc5d0bd2b113..364f2f27c8723f 100644
--- a/arch/mips/loongson64/dma.c
+++ b/arch/mips/loongson64/dma.c
@@ -4,7 +4,7 @@
 #include 
 #include 
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
 * Loongson-3's 48bit address space and embed it into 40bit */
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index d88395684f487d..cef4a47ab06311 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -170,7 +170,7 @@ static inline dma_addr_t ar2315_dev_offset(struct device 
*dev)
return 0;
 }
 
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
return paddr + ar2315_dev_offset(dev);
 }
diff --git a/arc

[PATCH 02/12] MIPS/jazzdma: remove the unused vdma_remap function

2020-09-08 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
Acked-by: Thomas Bogendoerfer 
---
 arch/mips/include/asm/jazzdma.h |  2 -
 arch/mips/jazz/jazzdma.c| 70 -
 2 files changed, 72 deletions(-)

diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h
index d13f940022d5f9..c831da7fa89803 100644
--- a/arch/mips/include/asm/jazzdma.h
+++ b/arch/mips/include/asm/jazzdma.h
@@ -10,8 +10,6 @@
  */
 extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
 extern int vdma_free(unsigned long laddr);
-extern int vdma_remap(unsigned long laddr, unsigned long paddr,
- unsigned long size);
 extern unsigned long vdma_phys2log(unsigned long paddr);
 extern unsigned long vdma_log2phys(unsigned long laddr);
 extern void vdma_stats(void);  /* for debugging only */
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 014773f0bfcd74..fe40dbed04c1d6 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -209,76 +209,6 @@ int vdma_free(unsigned long laddr)
 
 EXPORT_SYMBOL(vdma_free);
 
-/*
- * Map certain page(s) to another physical address.
- * Caller must have allocated the page(s) before.
- */
-int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
-{
-   int first, pages;
-
-   if (laddr > 0xff) {
-   if (vdma_debug)
-   printk
-   ("vdma_map: Invalid logical address: %08lx\n",
-laddr);
-   return -EINVAL; /* invalid logical address */
-   }
-   if (paddr > 0x1fff) {
-   if (vdma_debug)
-   printk
-   ("vdma_map: Invalid physical address: %08lx\n",
-paddr);
-   return -EINVAL; /* invalid physical address */
-   }
-
-   pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
-   first = laddr >> 12;
-   if (vdma_debug)
-   printk("vdma_remap: first=%x, pages=%x\n", first, pages);
-   if (first + pages > VDMA_PGTBL_ENTRIES) {
-   if (vdma_debug)
-   printk("vdma_alloc: Invalid size: %08lx\n", size);
-   return -EINVAL;
-   }
-
-   paddr &= ~(VDMA_PAGESIZE - 1);
-   while (pages > 0 && first < VDMA_PGTBL_ENTRIES) {
-   if (pgtbl[first].owner != laddr) {
-   if (vdma_debug)
-   printk("Trying to remap other's pages.\n");
-   return -EPERM;  /* not owner */
-   }
-   pgtbl[first].frame = paddr;
-   paddr += VDMA_PAGESIZE;
-   first++;
-   pages--;
-   }
-
-   /*
-* Update translation table
-*/
-   r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
-
-   if (vdma_debug > 2) {
-   int i;
-   pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
-   first = laddr >> 12;
-   printk("LADDR: ");
-   for (i = first; i < first + pages; i++)
-   printk("%08x ", i << 12);
-   printk("\nPADDR: ");
-   for (i = first; i < first + pages; i++)
-   printk("%08x ", pgtbl[i].frame);
-   printk("\nOWNER: ");
-   for (i = first; i < first + pages; i++)
-   printk("%08x ", pgtbl[i].owner);
-   printk("\n");
-   }
-
-   return 0;
-}
-
 /*
  * Translate a physical address to a logical address.
  * This will return the logical address of the first
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 09/12] dma-direct: remove __dma_to_phys

2020-09-08 Thread Christoph Hellwig
There is no harm in just always clearing the SME encryption bit, while
significantly simplifying the interface.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/include/asm/dma-direct.h  |  2 +-
 arch/mips/bmips/dma.c  |  2 +-
 arch/mips/cavium-octeon/dma-octeon.c   |  2 +-
 arch/mips/include/asm/dma-direct.h |  2 +-
 arch/mips/loongson2ef/fuloong-2e/dma.c |  2 +-
 arch/mips/loongson2ef/lemote-2f/dma.c  |  2 +-
 arch/mips/loongson64/dma.c |  2 +-
 arch/mips/pci/pci-ar2315.c |  2 +-
 arch/mips/pci/pci-xtalk-bridge.c   |  2 +-
 arch/mips/sgi-ip32/ip32-dma.c  |  2 +-
 arch/powerpc/include/asm/dma-direct.h  |  2 +-
 include/linux/dma-direct.h | 14 +-
 kernel/dma/direct.c|  6 +-
 13 files changed, 17 insertions(+), 25 deletions(-)

diff --git a/arch/arm/include/asm/dma-direct.h 
b/arch/arm/include/asm/dma-direct.h
index 7c3001a6a775bf..a8cee87a93e8ab 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -8,7 +8,7 @@ static inline dma_addr_t __phys_to_dma(struct device *dev, 
phys_addr_t paddr)
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
 }
 
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t 
dev_addr)
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
 {
unsigned int offset = dev_addr & ~PAGE_MASK;
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index df56bf4179e347..ba2a5d33dfd3fa 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -52,7 +52,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
return pa;
 }
 
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
 {
struct bmips_dma_range *r;
 
diff --git a/arch/mips/cavium-octeon/dma-octeon.c 
b/arch/mips/cavium-octeon/dma-octeon.c
index 14ea680d180e07..388b13ba2558c2 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -177,7 +177,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t 
paddr)
return paddr;
 }
 
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 {
 #ifdef CONFIG_PCI
if (dev && dev_is_pci(dev))
diff --git a/arch/mips/include/asm/dma-direct.h 
b/arch/mips/include/asm/dma-direct.h
index 14e352651ce946..8e178651c638c2 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -3,6 +3,6 @@
 #define _MIPS_DMA_DIRECT_H 1
 
 dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
 
 #endif /* _MIPS_DMA_DIRECT_H */
diff --git a/arch/mips/loongson2ef/fuloong-2e/dma.c 
b/arch/mips/loongson2ef/fuloong-2e/dma.c
index e122292bf6660a..83fadeb3fd7d56 100644
--- a/arch/mips/loongson2ef/fuloong-2e/dma.c
+++ b/arch/mips/loongson2ef/fuloong-2e/dma.c
@@ -6,7 +6,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
return paddr | 0x8000;
 }
 
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
 {
return dma_addr & 0x7fff;
 }
diff --git a/arch/mips/loongson2ef/lemote-2f/dma.c 
b/arch/mips/loongson2ef/lemote-2f/dma.c
index abf0e39d7e4696..302b43a14eee74 100644
--- a/arch/mips/loongson2ef/lemote-2f/dma.c
+++ b/arch/mips/loongson2ef/lemote-2f/dma.c
@@ -6,7 +6,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
return paddr | 0x8000;
 }
 
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
 {
if (dma_addr > 0x8fff)
return dma_addr;
diff --git a/arch/mips/loongson64/dma.c b/arch/mips/loongson64/dma.c
index dbfe6e82fddd1c..b3dc5d0bd2b113 100644
--- a/arch/mips/loongson64/dma.c
+++ b/arch/mips/loongson64/dma.c
@@ -13,7 +13,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t 
paddr)
return ((nid << 44) ^ paddr) | (nid << node_id_offset);
 }
 
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 {
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
 * Loongson-3's 48bit address space and embed it into 40bit */
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index 490953f515282a..d88395684f487d 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -175,7 +175,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t 
paddr)
return paddr + ar2315_dev_offset(dev);
 }
 
-phys_addr_t __dma_to_phys(struct device *dev, dma_a

[PATCH 12/12] dma-mapping: move the dma_declare_coherent_memory documentation

2020-09-08 Thread Christoph Hellwig
dma_declare_coherent_memory should not be in a DMA API guide aimed
at driver writers (that is consumers of the API).  Move it to a comment
near the function instead.

Signed-off-by: Christoph Hellwig 
---
 Documentation/core-api/dma-api.rst | 24 
 kernel/dma/coherent.c  | 17 +
 2 files changed, 17 insertions(+), 24 deletions(-)

diff --git a/Documentation/core-api/dma-api.rst 
b/Documentation/core-api/dma-api.rst
index 3b3a4b9a6f..90239348b30f6f 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -586,30 +586,6 @@ the DMA_ATTR_NON_CONSISTENT flag starting at virtual 
address vaddr and
 continuing on for size.  Again, you *must* observe the cache line
 boundaries when doing this.
 
-::
-
-   int
-   dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-   dma_addr_t device_addr, size_t size);
-
-Declare region of memory to be handed out by dma_alloc_coherent() when
-it's asked for coherent memory for this device.
-
-phys_addr is the CPU physical address to which the memory is currently
-assigned (this will be ioremapped so the CPU can access the region).
-
-device_addr is the DMA address the device needs to be programmed
-with to actually address this memory (this will be handed out as the
-dma_addr_t in dma_alloc_coherent()).
-
-size is the size of the area (must be multiples of PAGE_SIZE).
-
-As a simplification for the platforms, only *one* such region of
-memory may be declared per device.
-
-For reasons of efficiency, most platforms choose to track the declared
-region only at the granularity of a page.  For smaller allocations,
-you should use the dma_pool() API.
 
 Part III - Debug drivers use of the DMA-API
 ---
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 2a0c4985f38e41..f85d14bbfcbe03 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -107,6 +107,23 @@ static int dma_assign_coherent_memory(struct device *dev,
return 0;
 }
 
+/*
+ * Declare a region of memory to be handed out by dma_alloc_coherent() when it
+ * is asked for coherent memory for this device.  This shall only be used
+ * from platform code, usually based on the device tree description.
+ * 
+ * phys_addr is the CPU physical address to which the memory is currently
+ * assigned (this will be ioremapped so the CPU can access the region).
+ *
+ * device_addr is the DMA address the device needs to be programmed with to
+ * actually address this memory (this will be handed out as the dma_addr_t in
+ * dma_alloc_coherent()).
+ *
+ * size is the size of the area (must be a multiple of PAGE_SIZE).
+ *
+ * As a simplification for the platforms, only *one* such region of memory may
+ * be declared per device.
+ */
 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size)
 {
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 06/12] dma-direct: remove dma_direct_{alloc,free}_pages

2020-09-08 Thread Christoph Hellwig
Just merge these helpers into the main dma_direct_{alloc,free} routines,
as the additional checks are always false for the two callers.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/kernel/amd_gart_64.c |  6 +++---
 include/linux/dma-direct.h|  4 
 kernel/dma/direct.c   | 39 ++-
 kernel/dma/pool.c |  2 +-
 4 files changed, 19 insertions(+), 32 deletions(-)

diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index bccc5357bffd6c..153374b996a279 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -467,7 +467,7 @@ gart_alloc_coherent(struct device *dev, size_t size, 
dma_addr_t *dma_addr,
 {
void *vaddr;
 
-   vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
+   vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
if (!vaddr ||
!force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
return vaddr;
@@ -479,7 +479,7 @@ gart_alloc_coherent(struct device *dev, size_t size, 
dma_addr_t *dma_addr,
goto out_free;
return vaddr;
 out_free:
-   dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
+   dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
return NULL;
 }
 
@@ -489,7 +489,7 @@ gart_free_coherent(struct device *dev, size_t size, void 
*vaddr,
   dma_addr_t dma_addr, unsigned long attrs)
 {
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
-   dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
+   dma_direct_free(dev, size, vaddr, dma_addr, attrs);
 }
 
 static int no_agp;
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 95e3e28bd93f47..20eceb2e4f91f8 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -77,10 +77,6 @@ void *dma_direct_alloc(struct device *dev, size_t size, 
dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-void *dma_direct_alloc_pages(struct device *dev, size_t size,
-   dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
-void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
-   dma_addr_t dma_addr, unsigned long attrs);
 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 949c1cbf08b2d5..1d564bea58571b 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -151,13 +151,18 @@ static struct page *__dma_direct_alloc_pages(struct 
device *dev, size_t size,
return page;
 }
 
-void *dma_direct_alloc_pages(struct device *dev, size_t size,
+void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
struct page *page;
void *ret;
int err;
 
+   if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
+   !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+   dma_alloc_need_uncached(dev, attrs))
+   return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
+
size = PAGE_ALIGN(size);
 
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
@@ -256,11 +261,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t 
size,
return NULL;
 }
 
-void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
-   dma_addr_t dma_addr, unsigned long attrs)
+void dma_direct_free(struct device *dev, size_t size,
+   void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
unsigned int page_order = get_order(size);
 
+   if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
+   !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+   dma_alloc_need_uncached(dev, attrs)) {
+   arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
+   return;
+   }
+
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
if (dma_should_free_from_pool(dev, attrs) &&
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
@@ -284,27 +296,6 @@ void dma_direct_free_pages(struct device *dev, size_t 
size, void *cpu_addr,
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
 }
 
-void *dma_direct_alloc(struct device *dev, size_t size,
-   dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
-   if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-   !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-   dma_alloc_need_uncached(dev, attrs))
-   return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
-   return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
-}
-
-void dma_direct

[PATCH 04/12] dma-mapping: fix DMA_OPS dependencies

2020-09-08 Thread Christoph Hellwig
Driver that select DMA_OPS need to depend on HAS_DMA support to
work.  The vop driver was missing that dependency, so add it, and also
add a nother depends in DMA_OPS itself.  That won't fix the issue due
to how the Kconfig dependencies work, but at least produce a warning
about unmet dependencies.

Signed-off-by: Christoph Hellwig 
---
 drivers/misc/mic/Kconfig | 1 +
 kernel/dma/Kconfig   | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index b9bb086785db48..8a7c2c5711d5f4 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -35,6 +35,7 @@ config SCIF_BUS
 
 config VOP_BUS
tristate "VOP Bus Driver"
+   depends on HAS_DMA
select DMA_OPS
help
  This option is selected by any driver which registers a
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 0ddfb5510fe45f..e7b801649f6574 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -9,6 +9,7 @@ config HAS_DMA
default y
 
 config DMA_OPS
+   depends on HAS_DMA
bool
 
 #
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 11/12] dma-mapping: move dma_common_{mmap, get_sgtable} out of mapping.c

2020-09-08 Thread Christoph Hellwig
Add a new file that contains helpera for misc DMA ops, which is only
built when CONFIG_DMA_OPS is set.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/Makefile  |  1 +
 kernel/dma/mapping.c | 47 +---
 kernel/dma/ops_helpers.c | 51 
 3 files changed, 53 insertions(+), 46 deletions(-)
 create mode 100644 kernel/dma/ops_helpers.c

diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 32c7c1942bbd6c..dc755ab68aabf9 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_HAS_DMA)  += mapping.o direct.o
+obj-$(CONFIG_DMA_OPS)  += ops_helpers.o
 obj-$(CONFIG_DMA_OPS)  += dummy.o
 obj-$(CONFIG_DMA_CMA)  += contiguous.o
 obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 0d129421e75fc8..848c95c27d79ff 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -8,7 +8,7 @@
 #include  /* for max_pfn */
 #include 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
@@ -295,22 +295,6 @@ void dma_sync_sg_for_device(struct device *dev, struct 
scatterlist *sg,
 }
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
-/*
- * Create scatter-list for the already allocated DMA buffer.
- */
-int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-void *cpu_addr, dma_addr_t dma_addr, size_t size,
-unsigned long attrs)
-{
-   struct page *page = virt_to_page(cpu_addr);
-   int ret;
-
-   ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-   if (!ret)
-   sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-   return ret;
-}
-
 /*
  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
  * that the intention is to allow exporting memory allocated via the
@@ -358,35 +342,6 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 
unsigned long attrs)
 }
 #endif /* CONFIG_MMU */
 
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-   void *cpu_addr, dma_addr_t dma_addr, size_t size,
-   unsigned long attrs)
-{
-#ifdef CONFIG_MMU
-   unsigned long user_count = vma_pages(vma);
-   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   unsigned long off = vma->vm_pgoff;
-   int ret = -ENXIO;
-
-   vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
-
-   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-   return ret;
-
-   if (off >= count || user_count > count - off)
-   return -ENXIO;
-
-   return remap_pfn_range(vma, vma->vm_start,
-   page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
-   user_count << PAGE_SHIFT, vma->vm_page_prot);
-#else
-   return -ENXIO;
-#endif /* CONFIG_MMU */
-}
-
 /**
  * dma_can_mmap - check if a given device supports dma_mmap_*
  * @dev: device to check
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
new file mode 100644
index 00..e443c69be4299f
--- /dev/null
+++ b/kernel/dma/ops_helpers.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for DMA ops implementations.  These generally rely on the fact that
+ * the allocated memory contains normal pages in the direct kernel mapping.
+ */
+#include 
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ */
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+void *cpu_addr, dma_addr_t dma_addr, size_t size,
+unsigned long attrs)
+{
+   struct page *page = virt_to_page(cpu_addr);
+   int ret;
+
+   ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+   if (!ret)
+   sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+   return ret;
+}
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+   void *cpu_addr, dma_addr_t dma_addr, size_t size,
+   unsigned long attrs)
+{
+#ifdef CONFIG_MMU
+   unsigned long user_count = vma_pages(vma);
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+   unsigned long off = vma->vm_pgoff;
+   int ret = -ENXIO;
+
+   vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+   return ret;
+
+   if (off >= count || user_count > count - off)
+   return -ENXIO;
+
+   return remap_pfn_range(vma, vma->vm_start,
+   page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+   user_count << PAGE_SHIFT, vma->vm_page_prot);
+#else
+   return -ENXIO;
+#endif /* CONFIG_MMU */
+}
-- 
2.28.0

__

[PATCH 01/12] MIPS: make dma_sync_*_for_cpu a little less overzealous

2020-09-08 Thread Christoph Hellwig
When transferring DMA ownership back to the CPU there should never
be any writeback from the cache, as the buffer was owned by the
device until now.  Instead it should just be invalidated for the
mapping directions where the device could have written data.
Note that the changes rely on the fact that kmap_atomic is stubbed
out for the !HIGHMEM case to simplify the code a bit.

Signed-off-by: Christoph Hellwig 
Acked-by: Thomas Bogendoerfer 
---
 arch/mips/mm/dma-noncoherent.c | 44 +-
 1 file changed, 28 insertions(+), 16 deletions(-)

diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 563c2c0d0c8193..97a14adbafc99c 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -55,22 +55,34 @@ void *arch_dma_set_uncached(void *addr, size_t size)
return (void *)(__pa(addr) + UNCAC_BASE);
 }
 
-static inline void dma_sync_virt(void *addr, size_t size,
+static inline void dma_sync_virt_for_device(void *addr, size_t size,
enum dma_data_direction dir)
 {
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback((unsigned long)addr, size);
break;
-
case DMA_FROM_DEVICE:
dma_cache_inv((unsigned long)addr, size);
break;
-
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv((unsigned long)addr, size);
break;
+   default:
+   BUG();
+   }
+}
 
+static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
+   enum dma_data_direction dir)
+{
+   switch (dir) {
+   case DMA_TO_DEVICE:
+   break;
+   case DMA_FROM_DEVICE:
+   case DMA_BIDIRECTIONAL:
+   dma_cache_inv((unsigned long)addr, size);
+   break;
default:
BUG();
}
@@ -82,7 +94,7 @@ static inline void dma_sync_virt(void *addr, size_t size,
  * configured then the bulk of this loop gets optimized out.
  */
 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
-   enum dma_data_direction dir)
+   enum dma_data_direction dir, bool for_device)
 {
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long offset = paddr & ~PAGE_MASK;
@@ -90,18 +102,20 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t 
size,
 
do {
size_t len = left;
+   void *addr;
 
if (PageHighMem(page)) {
-   void *addr;
-
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
+   }
+
+   addr = kmap_atomic(page);
+   if (for_device)
+   dma_sync_virt_for_device(addr + offset, len, dir);
+   else
+   dma_sync_virt_for_cpu(addr + offset, len, dir);
+   kunmap_atomic(addr);
 
-   addr = kmap_atomic(page);
-   dma_sync_virt(addr + offset, len, dir);
-   kunmap_atomic(addr);
-   } else
-   dma_sync_virt(page_address(page) + offset, size, dir);
offset = 0;
page++;
left -= len;
@@ -111,7 +125,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t 
size,
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
 {
-   dma_sync_phys(paddr, size, dir);
+   dma_sync_phys(paddr, size, dir, true);
 }
 
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
@@ -119,16 +133,14 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
 {
if (cpu_needs_post_dma_flush())
-   dma_sync_phys(paddr, size, dir);
+   dma_sync_phys(paddr, size, dir, false);
 }
 #endif
 
 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
 {
-   BUG_ON(direction == DMA_NONE);
-
-   dma_sync_virt(vaddr, size, direction);
+   dma_sync_virt_for_device(vaddr, size, direction);
 }
 
 #ifdef CONFIG_DMA_PERDEV_COHERENT
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 08/12] dma-direct: use phys_to_dma_direct in dma_direct_alloc

2020-09-08 Thread Christoph Hellwig
Replace the currently open code copy.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/direct.c | 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 12e9f5f75dfe4b..57a6e7d7cf8f16 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -240,10 +240,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
goto out_encrypt_pages;
}
 done:
-   if (force_dma_unencrypted(dev))
-   *dma_handle = __phys_to_dma(dev, page_to_phys(page));
-   else
-   *dma_handle = phys_to_dma(dev, page_to_phys(page));
+   *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return ret;
 
 out_encrypt_pages:
-- 
2.28.0

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 1/9] docs: Document IO Address Space ID (IOASID) APIs

2020-09-08 Thread Jacob Pan
On Mon, 7 Sep 2020 10:03:39 +0200
Auger Eric  wrote:

> Hi Jacob,
> 
> On 9/1/20 6:56 PM, Jacob Pan wrote:
> > Hi Eric,
> > 
> > On Thu, 27 Aug 2020 18:21:07 +0200
> > Auger Eric  wrote:
> >   
> >> Hi Jacob,
> >> On 8/24/20 12:32 PM, Jean-Philippe Brucker wrote:  
> >>> On Fri, Aug 21, 2020 at 09:35:10PM -0700, Jacob Pan wrote:
>  IOASID is used to identify address spaces that can be targeted by
>  device DMA. It is a system-wide resource that is essential to its
>  many users. This document is an attempt to help developers from
>  all vendors navigate the APIs. At this time, ARM SMMU and Intel’s
>  Scalable IO Virtualization (SIOV) enabled platforms are the
>  primary users of IOASID. Examples of how SIOV components interact
>  with IOASID APIs are provided in that many APIs are driven by the
>  requirements from SIOV.
> 
>  Signed-off-by: Liu Yi L 
>  Signed-off-by: Wu Hao 
>  Signed-off-by: Jacob Pan 
>  ---
>   Documentation/ioasid.rst | 618
>  +++ 1 file changed,
>  618 insertions(+) create mode 100644 Documentation/ioasid.rst
> 
>  diff --git a/Documentation/ioasid.rst b/Documentation/ioasid.rst
> >>>
> >>> Thanks for writing this up. Should it go to
> >>> Documentation/driver-api/, or Documentation/driver-api/iommu/? I
> >>> think this also needs to Cc linux-...@vger.kernel.org and
> >>> cor...@lwn.net   
>  new file mode 100644
>  index ..b6a8cdc885ff
>  --- /dev/null
>  +++ b/Documentation/ioasid.rst
>  @@ -0,0 +1,618 @@
>  +.. ioasid:
>  +
>  +=
>  +IO Address Space ID
>  +=
>  +
>  +IOASID is a generic name for PCIe Process Address ID (PASID) or
>  ARM +SMMU sub-stream ID. An IOASID identifies an address space
>  that DMA
> >>>
> >>> "SubstreamID"
> >> On ARM if we don't use PASIDs we have streamids (SID) which can also
> >> identify address spaces that DMA requests can target. So maybe this
> >> definition is not sufficient.
> >>  
> > According to SMMU spec, the SubstreamID is equivalent to PASID. My
> > understanding is that SID is equivalent to PCI requester ID that
> > identifies stage 2. Do you plan to use IOASID for stage 2?  
> No. So actually if PASID is not used we still have a default single
> IOASID matching the single context. So that may be fine as a definition.
OK, thanks for explaining.

> > IOASID is mostly for SVA and DMA request w/ PASID.
> >   
> >>> 
>  +requests can target.
>  +
>  +The primary use cases for IOASID are Shared Virtual Address (SVA)
>  and +IO Virtual Address (IOVA). However, the requirements for
>  IOASID
> >>>
> >>> IOVA alone isn't a use case, maybe "multiple IOVA spaces per
> >>> device"?   
>  +management can vary among hardware architectures.
>  +
>  +This document covers the generic features supported by IOASID
>  +APIs. Vendor-specific use cases are also illustrated with Intel's
>  VT-d +based platforms as the first example.
>  +
>  +.. contents:: :local:
>  +
>  +Glossary
>  +
>  +PASID - Process Address Space ID
>  +
>  +IOASID - IO Address Space ID (generic term for PCIe PASID and
>  +sub-stream ID in SMMU)
> >>>
> >>> "SubstreamID"
> >>> 
>  +
>  +SVA/SVM - Shared Virtual Addressing/Memory
>  +
>  +ENQCMD - New Intel X86 ISA for efficient workqueue submission
>  [1]
> >>>
> >>> Maybe drop the "New", to keep the documentation perennial. It might
> >>> be good to add internal links here to the specifications URLs at
> >>> the bottom.   
>  +
>  +DSA - Intel Data Streaming Accelerator [2]
>  +
>  +VDCM - Virtual device composition module [3]
>  +
>  +SIOV - Intel Scalable IO Virtualization
>  +
>  +
>  +Key Concepts
>  +
>  +
>  +IOASID Set
>  +---
>  +An IOASID set is a group of IOASIDs allocated from the system-wide
>  +IOASID pool. An IOASID set is created and can be identified by a
>  +token of u64. Refer to IOASID set APIs for more details.
> >>>
> >>> Identified either by an u64 or an mm_struct, right?  Maybe just
> >>> drop the second sentence if it's detailed in the IOASID set section
> >>> below.   
>  +
>  +IOASID set is particularly useful for guest SVA where each guest
>  could +have its own IOASID set for security and efficiency reasons.
>  +
>  +IOASID Set Private ID (SPID)
>  +
>  +SPIDs are introduced as IOASIDs within its set. Each SPID maps to
>  a +system-wide IOASID but the namespace of SPID is within its
>  IOASID +set.
> >>>
> >>> The intro isn't super clear. Perhaps this is simpler:
> >>> "Each IOASID set has a private namespace of SPIDs. An SPID maps to a
> >>> single system-wide IOAS

Re: [PATCH v8 0/7] IOMMU user API enhancement

2020-09-08 Thread Jacob Pan
Hi Joerg, Alex, and all.

Just wondering if there are any more comments? We have discussed this
at LPC, there are other patches depending on this set.

Thanks,

Jacob

On Mon, 31 Aug 2020 11:24:53 -0700
Jacob Pan  wrote:

> IOMMU user API header was introduced to support nested DMA
> translation and related fault handling. The current UAPI data
> structures consist of three areas that cover the interactions between
> host kernel and guest:
>  - fault handling
>  - cache invalidation
>  - bind guest page tables, i.e. guest PASID
> 
> Future extensions are likely to support more architectures and vIOMMU
> features.
> 
> In the previous discussion, using user-filled data size and feature
> flags is made a preferred approach over a unified version number.
> https://lkml.org/lkml/2020/1/29/45
> 
> In addition to introduce argsz field to data structures, this
> patchset is also trying to document the UAPI design, usage, and
> extension rules. VT-d driver changes to utilize the new argsz field
> is included, VFIO usage is to follow.
> 
> This set is available at:
> https://github.com/jacobpan/linux.git vsva_v5.8_uapi_v8
> 
> Thanks,
> 
> Jacob
> 
> 
> Changelog:
> v8
>   - Rebased to v5.9-rc2
>   - Addressed review comments from Eric Auger
> 1. added a check for the unused vendor flags
> 2. commit message improvements
> v7
>   - Added PASID data format enum for range checking
>   - Tidy up based on reviews from Alex W.
>   - Removed doc section for vIOMMU fault handling
> v6
>   - Renamed all UAPI functions with iommu_uapi_ prefix
>   - Replaced argsz maxsz checking with flag specific size checks
>   - Documentation improvements based on suggestions by Eric
> Auger Replaced example code with a pointer to the actual code
>   - Added more checks for illegal flags combinations
>   - Added doc file to MAINTAINERS
> v5
>   - Addjusted paddings in UAPI data to be 8 byte aligned
>   - Do not clobber argsz in IOMMU core before passing on to
> vendor driver
>   - Removed pr_warn_ for invalid UAPI data check, just return
> -EINVAL
>   - Clarified VFIO responsibility in UAPI data handling
>   - Use iommu_uapi prefix to differentiate APIs has in-kernel
> caller
>   - Added comment for unchecked flags of invalidation
> granularity
>   - Added example in doc to show vendor data checking
> 
> v4
>   - Added checks of UAPI data for reserved fields, version, and
> flags.
>   - Removed version check from vendor driver (vt-d)
>   - Relaxed argsz check to match the UAPI struct size instead
> of variable union size
>   - Updated documentation
> 
> v3:
>   - Rewrote backward compatibility rule to support existing code
> re-compiled with newer kernel UAPI header that runs on older
> kernel. Based on review comment from Alex W.
> https://lore.kernel.org/linux-iommu/20200611094741.6d118...@w520.home/
>   - Take user pointer directly in UAPI functions. Perform argsz
> check and copy_from_user() in IOMMU driver. Eliminate the need for
> VFIO or other upper layer to parse IOMMU data.
>   - Create wrapper function for in-kernel users of UAPI
> functions v2:
>   - Removed unified API version and helper
>   - Introduced argsz for each UAPI data
>   - Introduced UAPI doc
> 
> 
> Jacob Pan (7):
>   docs: IOMMU user API
>   iommu/uapi: Add argsz for user filled data
>   iommu/uapi: Introduce enum type for PASID data format
>   iommu/uapi: Use named union for user data
>   iommu/uapi: Rename uapi functions
>   iommu/uapi: Handle data and argsz filled by users
>   iommu/vt-d: Check UAPI data processed by IOMMU core
> 
>  Documentation/userspace-api/iommu.rst | 211
> ++
> MAINTAINERS   |   1 +
> drivers/iommu/intel/iommu.c   |  25 ++--
> drivers/iommu/intel/svm.c |  13 ++-
> drivers/iommu/iommu.c | 205
> +++--
> include/linux/iommu.h |  35 --
> include/uapi/linux/iommu.h|  25 ++-- 7 files changed, 470
> insertions(+), 45 deletions(-) create mode 100644
> Documentation/userspace-api/iommu.rst
> 

[Jacob Pan]
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 04/12] dma-mapping: fix DMA_OPS dependencies

2020-09-08 Thread Sergei Shtylyov
Hello!

On 9/8/20 7:47 PM, Christoph Hellwig wrote:

> Driver that select DMA_OPS need to depend on HAS_DMA support to
> work.  The vop driver was missing that dependency, so add it, and also
> add a nother depends in DMA_OPS itself.  That won't fix the issue due

   Another? :-)

> to how the Kconfig dependencies work, but at least produce a warning
> about unmet dependencies.
> 
> Signed-off-by: Christoph Hellwig 
[...]

MBR, Sergei
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v3 1/8] iommu/arm-smmu: Refactor context bank allocation

2020-09-08 Thread Jordan Crouse
On Fri, Sep 04, 2020 at 03:55:06PM +, Bjorn Andersson wrote:
> Extract the conditional invocation of the platform defined
> alloc_context_bank() to a separate function to keep
> arm_smmu_init_domain_context() cleaner.
> 
> Instead pass a reference to the arm_smmu_device as parameter to the
> call. Also remove the count parameter, as this can be read from the
> newly passed object.
> 
> This allows us to not assign smmu_domain->smmu before attempting to
> allocate the context bank and as such we don't need to roll back this
> assignment on failure.

Much nicer.

Reviewed-by: Jordan Crouse 

> Signed-off-by: Bjorn Andersson 
> ---
> 
> Note that this series applies ontop of:
> https://lore.kernel.org/linux-arm-msm/20200901164707.2645413-1-robdcl...@gmail.com/
> 
> This could either go on its own, or be squashed with "[PATCH v16 14/20]
> iommu/arm-smmu: Prepare for the adreno-smmu implementation" from Rob's series.
> 
> Changes since v2:
> - New patch
> 
>  drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c |  6 --
>  drivers/iommu/arm/arm-smmu/arm-smmu.c  | 23 --
>  drivers/iommu/arm/arm-smmu/arm-smmu.h  |  3 ++-
>  3 files changed, 19 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c 
> b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
> index 2aa6249050ff..0663d7d26908 100644
> --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
> +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
> @@ -91,9 +91,10 @@ static int qcom_adreno_smmu_set_ttbr0_cfg(const void 
> *cookie,
>  }
>  
>  static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain 
> *smmu_domain,
> - struct device *dev, int start, int count)
> +struct arm_smmu_device *smmu,
> +struct device *dev, int start)
>  {
> - struct arm_smmu_device *smmu = smmu_domain->smmu;
> + int count;
>  
>   /*
>* Assign context bank 0 to the GPU device so the GPU hardware can
> @@ -104,6 +105,7 @@ static int qcom_adreno_smmu_alloc_context_bank(struct 
> arm_smmu_domain *smmu_doma
>   count = 1;
>   } else {
>   start = 1;
> + count = smmu->num_context_banks;
>   }
>  
>   return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
> diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c 
> b/drivers/iommu/arm/arm-smmu/arm-smmu.c
> index bbec5793faf8..e19d7bdc7674 100644
> --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
> +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
> @@ -623,6 +623,16 @@ void arm_smmu_write_context_bank(struct arm_smmu_device 
> *smmu, int idx)
>   arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
>  }
>  
> +static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
> +struct arm_smmu_device *smmu,
> +struct device *dev, unsigned int start)
> +{
> + if (smmu->impl && smmu->impl->alloc_context_bank)
> + return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, 
> start);
> +
> + return __arm_smmu_alloc_bitmap(smmu->context_map, start, 
> smmu->num_context_banks);
> +}
> +
>  static int arm_smmu_init_domain_context(struct iommu_domain *domain,
>   struct arm_smmu_device *smmu,
>   struct device *dev)
> @@ -741,20 +751,13 @@ static int arm_smmu_init_domain_context(struct 
> iommu_domain *domain,
>   goto out_unlock;
>   }
>  
> - smmu_domain->smmu = smmu;
> -
> - if (smmu->impl && smmu->impl->alloc_context_bank)
> - ret = smmu->impl->alloc_context_bank(smmu_domain, dev,
> - start, smmu->num_context_banks);
> - else
> - ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
> -   smmu->num_context_banks);
> -
> + ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
>   if (ret < 0) {
> - smmu_domain->smmu = NULL;
>   goto out_unlock;
>   }
>  
> + smmu_domain->smmu = smmu;
> +
>   cfg->cbndx = ret;
>   if (smmu->version < ARM_SMMU_V2) {
>   cfg->irptndx = atomic_inc_return(&smmu->irptndx);
> diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h 
> b/drivers/iommu/arm/arm-smmu/arm-smmu.h
> index 2df3a70a8a41..ddf2ca4c923d 100644
> --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
> +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
> @@ -437,7 +437,8 @@ struct arm_smmu_impl {
>   irqreturn_t (*global_fault)(int irq, void *dev);
>   irqreturn_t (*context_fault)(int irq, void *dev);
>   int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
> - struct device *dev, int start, int max);
> +   struct arm_smmu_device *smmu,
> +   struct device *dev, int start);
>  };
>  
>  #define I

Re: [PATCH v3 1/8] iommu/arm-smmu: Refactor context bank allocation

2020-09-08 Thread Jordan Crouse
On Tue, Sep 08, 2020 at 06:42:06PM +, Jordan Crouse wrote:
> On Fri, Sep 04, 2020 at 03:55:06PM +, Bjorn Andersson wrote:
> > Extract the conditional invocation of the platform defined
> > alloc_context_bank() to a separate function to keep
> > arm_smmu_init_domain_context() cleaner.
> > 
> > Instead pass a reference to the arm_smmu_device as parameter to the
> > call. Also remove the count parameter, as this can be read from the
> > newly passed object.
> > 
> > This allows us to not assign smmu_domain->smmu before attempting to
> > allocate the context bank and as such we don't need to roll back this
> > assignment on failure.
> 
> Much nicer.
> 
> Reviewed-by: Jordan Crouse 

I didn't notice that Rob had grabbed this one for his stack. That's fine too.

> > Signed-off-by: Bjorn Andersson 
> > ---
> > 
> > Note that this series applies ontop of:
> > https://lore.kernel.org/linux-arm-msm/20200901164707.2645413-1-robdcl...@gmail.com/
> > 
> > This could either go on its own, or be squashed with "[PATCH v16 14/20]
> > iommu/arm-smmu: Prepare for the adreno-smmu implementation" from Rob's 
> > series.
> > 
> > Changes since v2:
> > - New patch
> > 
> >  drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c |  6 --
> >  drivers/iommu/arm/arm-smmu/arm-smmu.c  | 23 --
> >  drivers/iommu/arm/arm-smmu/arm-smmu.h  |  3 ++-
> >  3 files changed, 19 insertions(+), 13 deletions(-)
> > 
> > diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c 
> > b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
> > index 2aa6249050ff..0663d7d26908 100644
> > --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
> > +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
> > @@ -91,9 +91,10 @@ static int qcom_adreno_smmu_set_ttbr0_cfg(const void 
> > *cookie,
> >  }
> >  
> >  static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain 
> > *smmu_domain,
> > -   struct device *dev, int start, int count)
> > +  struct arm_smmu_device *smmu,
> > +  struct device *dev, int start)
> >  {
> > -   struct arm_smmu_device *smmu = smmu_domain->smmu;
> > +   int count;
> >  
> > /*
> >  * Assign context bank 0 to the GPU device so the GPU hardware can
> > @@ -104,6 +105,7 @@ static int qcom_adreno_smmu_alloc_context_bank(struct 
> > arm_smmu_domain *smmu_doma
> > count = 1;
> > } else {
> > start = 1;
> > +   count = smmu->num_context_banks;
> > }
> >  
> > return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
> > diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c 
> > b/drivers/iommu/arm/arm-smmu/arm-smmu.c
> > index bbec5793faf8..e19d7bdc7674 100644
> > --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
> > +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
> > @@ -623,6 +623,16 @@ void arm_smmu_write_context_bank(struct 
> > arm_smmu_device *smmu, int idx)
> > arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
> >  }
> >  
> > +static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
> > +  struct arm_smmu_device *smmu,
> > +  struct device *dev, unsigned int start)
> > +{
> > +   if (smmu->impl && smmu->impl->alloc_context_bank)
> > +   return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, 
> > start);
> > +
> > +   return __arm_smmu_alloc_bitmap(smmu->context_map, start, 
> > smmu->num_context_banks);
> > +}
> > +
> >  static int arm_smmu_init_domain_context(struct iommu_domain *domain,
> > struct arm_smmu_device *smmu,
> > struct device *dev)
> > @@ -741,20 +751,13 @@ static int arm_smmu_init_domain_context(struct 
> > iommu_domain *domain,
> > goto out_unlock;
> > }
> >  
> > -   smmu_domain->smmu = smmu;
> > -
> > -   if (smmu->impl && smmu->impl->alloc_context_bank)
> > -   ret = smmu->impl->alloc_context_bank(smmu_domain, dev,
> > -   start, smmu->num_context_banks);
> > -   else
> > -   ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
> > - smmu->num_context_banks);
> > -
> > +   ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
> > if (ret < 0) {
> > -   smmu_domain->smmu = NULL;
> > goto out_unlock;
> > }
> >  
> > +   smmu_domain->smmu = smmu;
> > +
> > cfg->cbndx = ret;
> > if (smmu->version < ARM_SMMU_V2) {
> > cfg->irptndx = atomic_inc_return(&smmu->irptndx);
> > diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h 
> > b/drivers/iommu/arm/arm-smmu/arm-smmu.h
> > index 2df3a70a8a41..ddf2ca4c923d 100644
> > --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
> > +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
> > @@ -437,7 +437,8 @@ struct arm_smmu_impl {
> > irqreturn_t (*global_fault)(int irq, void *dev);
> > irqreturn_t (*context_fault)(int 

Re: [PATCH v2 4/9] iommu/ioasid: Add reference couting functions

2020-09-08 Thread Jacob Pan
On Tue, 25 Aug 2020 12:19:37 +0200
Jean-Philippe Brucker  wrote:

> On Fri, Aug 21, 2020 at 09:35:13PM -0700, Jacob Pan wrote:
> > There can be multiple users of an IOASID, each user could have
> > hardware contexts associated with the IOASID. In order to align
> > lifecycles, reference counting is introduced in this patch. It is
> > expected that when an IOASID is being freed, each user will drop a
> > reference only after its context is cleared.
> > 
> > Signed-off-by: Jacob Pan 
> > ---
> >  drivers/iommu/ioasid.c | 113
> > +
> > include/linux/ioasid.h |   4 ++ 2 files changed, 117 insertions(+)
> > 
> > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> > index f73b3dbfc37a..5f31d63c75b1 100644
> > --- a/drivers/iommu/ioasid.c
> > +++ b/drivers/iommu/ioasid.c
> > @@ -717,6 +717,119 @@ int ioasid_set_for_each_ioasid(struct
> > ioasid_set *set, EXPORT_SYMBOL_GPL(ioasid_set_for_each_ioasid);
> >  
> >  /**
> > + * IOASID refcounting rules
> > + * - ioasid_alloc() set initial refcount to 1
> > + *
> > + * - ioasid_free() decrement and test refcount.
> > + * If refcount is 0, ioasid will be freed. Deleted from the
> > system-wide
> > + * xarray as well as per set xarray. The IOASID will be
> > returned to the
> > + * pool and available for new allocations.
> > + *
> > + * If recount is non-zero, mark IOASID as
> > IOASID_STATE_FREE_PENDING.
> > + * No new reference can be added. The IOASID is not returned
> > to the pool
> > + * for reuse.
> > + * After free, ioasid_get() will return error but
> > ioasid_find() and other
> > + * non refcount adding APIs will continue to work until the
> > last reference
> > + * is dropped
> > + *
> > + * - ioasid_get() get a reference on an active IOASID
> > + *
> > + * - ioasid_put() decrement and test refcount of the IOASID.
> > + * If refcount is 0, ioasid will be freed. Deleted from the
> > system-wide
> > + * xarray as well as per set xarray. The IOASID will be
> > returned to the
> > + * pool and available for new allocations.
> > + * Do nothing if refcount is non-zero.
> > + *
> > + * - ioasid_find() does not take reference, caller must hold
> > reference
> > + *
> > + * ioasid_free() can be called multiple times without error until
> > all refs are
> > + * dropped.
> > + */  
> 
> Since you already document this in ioasid.rst, I'm not sure the
> comment is necessary. Maybe the doc for _free/_put would be better in
> the function's documentation.
> 
good point. will move the free/put to doc.

> > +
> > +int ioasid_get_locked(struct ioasid_set *set, ioasid_t ioasid)
> > +{
> > +   struct ioasid_data *data;
> > +
> > +   data = xa_load(&active_allocator->xa, ioasid);
> > +   if (!data) {
> > +   pr_err("Trying to get unknown IOASID %u\n",
> > ioasid);
> > +   return -EINVAL;
> > +   }
> > +   if (data->state == IOASID_STATE_FREE_PENDING) {
> > +   pr_err("Trying to get IOASID being freed%u\n",
> > ioasid);
> > +   return -EBUSY;
> > +   }
> > +
> > +   if (set && data->set != set) {
> > +   pr_err("Trying to get IOASID not in set%u\n",
> > ioasid);
> > +   /* data found but does not belong to the set */
> > +   return -EACCES;
> > +   }
> > +   refcount_inc(&data->users);
> > +
> > +   return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_get_locked);  
> 
> Is it necessary to export the *_locked variant?  Who'd call them and
> how would they acquire the lock?
> 
It is used by KVM inside the atomic notifier handler. Notifier chain is
invoked under the lock.

> > +
> > +/**
> > + * ioasid_get - Obtain a reference of an ioasid
> > + * @set
> > + * @ioasid  
> 
> Can be dropped. The doc checker will throw a warning, though.
> 
yes, will do

> > + *
> > + * Check set ownership if @set is non-null.
> > + */
> > +int ioasid_get(struct ioasid_set *set, ioasid_t ioasid)
> > +{
> > +   int ret = 0;  
> 
> No need to initialize ret
> 
right,

> > +
> > +   spin_lock(&ioasid_allocator_lock);
> > +   ret = ioasid_get_locked(set, ioasid);
> > +   spin_unlock(&ioasid_allocator_lock);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_get);
> > +
> > +void ioasid_put_locked(struct ioasid_set *set, ioasid_t ioasid)
> > +{
> > +   struct ioasid_data *data;
> > +
> > +   data = xa_load(&active_allocator->xa, ioasid);
> > +   if (!data) {
> > +   pr_err("Trying to put unknown IOASID %u\n",
> > ioasid);
> > +   return;
> > +   }
> > +
> > +   if (set && data->set != set) {
> > +   pr_err("Trying to drop IOASID not in the set
> > %u\n", ioasid);
> > +   return;
> > +   }
> > +
> > +   if (!refcount_dec_and_test(&data->users)) {
> > +   pr_debug("%s: IOASID %d has %d remainning users\n",
> > +   __func__, ioasid,
> > refcount_read(&data->users));
> > +   return;
> > +   }
> > +   ioasid_do_free(data);
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_put_locked);
> > +
>

Re: [PATCH v2 4/9] iommu/ioasid: Add reference couting functions

2020-09-08 Thread Jacob Pan
On Tue, 1 Sep 2020 14:13:00 +0200
Auger Eric  wrote:

> Hi Jacob,
> 
> On 8/22/20 6:35 AM, Jacob Pan wrote:
> > There can be multiple users of an IOASID, each user could have
> > hardware contexts associated with the IOASID. In order to align
> > lifecycles, reference counting is introduced in this patch. It is
> > expected that when an IOASID is being freed, each user will drop a
> > reference only after its context is cleared.
> > 
> > Signed-off-by: Jacob Pan 
> > ---
> >  drivers/iommu/ioasid.c | 113
> > +
> > include/linux/ioasid.h |   4 ++ 2 files changed, 117 insertions(+)
> > 
> > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> > index f73b3dbfc37a..5f31d63c75b1 100644
> > --- a/drivers/iommu/ioasid.c
> > +++ b/drivers/iommu/ioasid.c
> > @@ -717,6 +717,119 @@ int ioasid_set_for_each_ioasid(struct
> > ioasid_set *set, EXPORT_SYMBOL_GPL(ioasid_set_for_each_ioasid);
> >  
> >  /**
> > + * IOASID refcounting rules
> > + * - ioasid_alloc() set initial refcount to 1
> > + *
> > + * - ioasid_free() decrement and test refcount.
> > + * If refcount is 0, ioasid will be freed. Deleted from the
> > system-wide
> > + * xarray as well as per set xarray. The IOASID will be
> > returned to the
> > + * pool and available for new allocations.
> > + *
> > + * If recount is non-zero, mark IOASID as
> > IOASID_STATE_FREE_PENDING.  
> s/recount/refcount
> > + * No new reference can be added. The IOASID is not returned
> > to the pool  
> can be taken
will do. and move to doc as Jean suggested.

> > + * for reuse.
> > + * After free, ioasid_get() will return error but
> > ioasid_find() and other
> > + * non refcount adding APIs will continue to work until the
> > last reference
> > + * is dropped
> > + *
> > + * - ioasid_get() get a reference on an active IOASID
> > + *
> > + * - ioasid_put() decrement and test refcount of the IOASID.
> > + * If refcount is 0, ioasid will be freed. Deleted from the
> > system-wide
> > + * xarray as well as per set xarray. The IOASID will be
> > returned to the
> > + * pool and available for new allocations.
> > + * Do nothing if refcount is non-zero.  
> I would drop this last sentence
will do.

> > + *
> > + * - ioasid_find() does not take reference, caller must hold
> > reference  
> So can you use ioasid_find() once the state is
> IOASID_STATE_FREE_PENDING? According to Jean's reply, the "IOASID may
> be freed once ioasid_find() returns but not the returned data." So
> holding a ref is not mandated right?
right, you can still find the private data in FREE_PENDING state. I
will drop the comment.

> > + *
> > + * ioasid_free() can be called multiple times without error until
> > all refs are
> > + * dropped.
> > + */
> > +
> > +int ioasid_get_locked(struct ioasid_set *set, ioasid_t ioasid)
> > +{
> > +   struct ioasid_data *data;
> > +
> > +   data = xa_load(&active_allocator->xa, ioasid);
> > +   if (!data) {
> > +   pr_err("Trying to get unknown IOASID %u\n",
> > ioasid);
> > +   return -EINVAL;
> > +   }
> > +   if (data->state == IOASID_STATE_FREE_PENDING) {
> > +   pr_err("Trying to get IOASID being freed%u\n",
> > ioasid);
> > +   return -EBUSY;
> > +   }
> > +
> > +   if (set && data->set != set) {
> > +   pr_err("Trying to get IOASID not in set%u\n",
> > ioasid);  
> maybe try to normalize your traces using always the same formatting
> for ioasids and ioasid sets. Also we can understand %u is the id of
> the set.
it is confusing, this is not the set ID. I will change to:
pr_err("Trying to get IOASID %u outside the set\n", ioasid);

> > +   /* data found but does not belong to the set */  
> you can get rid of this comment
> > +   return -EACCES;
> > +   }
> > +   refcount_inc(&data->users);
> > +
> > +   return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_get_locked);
> > +
> > +/**
> > + * ioasid_get - Obtain a reference of an ioasid
> > + * @set
> > + * @ioasid
> > + *
> > + * Check set ownership if @set is non-null.
> > + */
> > +int ioasid_get(struct ioasid_set *set, ioasid_t ioasid)
> > +{
> > +   int ret = 0;
> > +
> > +   spin_lock(&ioasid_allocator_lock);
> > +   ret = ioasid_get_locked(set, ioasid);
> > +   spin_unlock(&ioasid_allocator_lock);
> > +
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_get);
> > +
> > +void ioasid_put_locked(struct ioasid_set *set, ioasid_t ioasid)
> > +{
> > +   struct ioasid_data *data;
> > +
> > +   data = xa_load(&active_allocator->xa, ioasid);
> > +   if (!data) {
> > +   pr_err("Trying to put unknown IOASID %u\n",
> > ioasid);
> > +   return;
> > +   }
> > +
> > +   if (set && data->set != set) {
> > +   pr_err("Trying to drop IOASID not in the set
> > %u\n", ioasid);  
> was set%u above
yes, same change below?
pr_err("Trying to drop IOASID %u outside the set\n", ioasid);

> > +   return;
> > +   }
> > +
> > +   if (!refcount_dec_an

Re: [PATCH v2 5/9] iommu/ioasid: Introduce ioasid_set private ID

2020-09-08 Thread Jacob Pan
On Tue, 25 Aug 2020 12:22:09 +0200
Jean-Philippe Brucker  wrote:

> On Fri, Aug 21, 2020 at 09:35:14PM -0700, Jacob Pan wrote:
> > When an IOASID set is used for guest SVA, each VM will acquire its
> > ioasid_set for IOASID allocations. IOASIDs within the VM must have a
> > host/physical IOASID backing, mapping between guest and host IOASIDs can
> > be non-identical. IOASID set private ID (SPID) is introduced in this
> > patch to be used as guest IOASID. However, the concept of ioasid_set
> > specific namespace is generic, thus named SPID.
> > 
> > As SPID namespace is within the IOASID set, the IOASID core can provide
> > lookup services at both directions. SPIDs may not be allocated when its
> > IOASID is allocated, the mapping between SPID and IOASID is usually
> > established when a guest page table is bound to a host PASID.
> > 
> > Signed-off-by: Jacob Pan 
> > ---
> >  drivers/iommu/ioasid.c | 54 
> > ++
> >  include/linux/ioasid.h | 12 +++
> >  2 files changed, 66 insertions(+)
> > 
> > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> > index 5f31d63c75b1..c0aef38a4fde 100644
> > --- a/drivers/iommu/ioasid.c
> > +++ b/drivers/iommu/ioasid.c
> > @@ -21,6 +21,7 @@ enum ioasid_state {
> >   * struct ioasid_data - Meta data about ioasid
> >   *
> >   * @id:Unique ID
> > + * @spid:  Private ID unique within a set
> >   * @users  Number of active users
> >   * @state  Track state of the IOASID
> >   * @setMeta data of the set this IOASID belongs to
> > @@ -29,6 +30,7 @@ enum ioasid_state {
> >   */
> >  struct ioasid_data {
> > ioasid_t id;
> > +   ioasid_t spid;
> > struct ioasid_set *set;
> > refcount_t users;
> > enum ioasid_state state;
> > @@ -326,6 +328,58 @@ int ioasid_attach_data(ioasid_t ioasid, void *data)
> >  EXPORT_SYMBOL_GPL(ioasid_attach_data);
> >  
> >  /**
> > + * ioasid_attach_spid - Attach ioasid_set private ID to an IOASID
> > + *
> > + * @ioasid: the ID to attach
> > + * @spid:   the ioasid_set private ID of @ioasid
> > + *
> > + * For IOASID that is already allocated, private ID within the set can be
> > + * attached via this API. Future lookup can be done via ioasid_find.  
> 
> via ioasid_find_by_spid()?
> 
yes, will update.

> > + */
> > +int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid)
> > +{
> > +   struct ioasid_data *ioasid_data;
> > +   int ret = 0;
> > +
> > +   spin_lock(&ioasid_allocator_lock);
> > +   ioasid_data = xa_load(&active_allocator->xa, ioasid);
> > +
> > +   if (!ioasid_data) {
> > +   pr_err("No IOASID entry %d to attach SPID %d\n",
> > +   ioasid, spid);
> > +   ret = -ENOENT;
> > +   goto done_unlock;
> > +   }
> > +   ioasid_data->spid = spid;
> > +
> > +done_unlock:
> > +   spin_unlock(&ioasid_allocator_lock);
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_attach_spid);
> > +
> > +ioasid_t ioasid_find_by_spid(struct ioasid_set *set, ioasid_t spid)  
> 
> Maybe add a bit of documentation as this is public-facing.
> 
Good point, I will add
/**
 * ioasid_find_by_spid - Find the system-wide IOASID by a set private ID and
 * its set.
 *
 * @set:the ioasid_set to search within
 * @spid:   the set private ID
 *
 * Given a set private ID and its IOASID set, find the system-wide IOASID. Take
 * a reference upon finding the matching IOASID. Return INVALID_IOASID if the
 * IOASID is not found in the set or the set is not valid.
 */

> > +{
> > +   struct ioasid_data *entry;
> > +   unsigned long index;
> > +
> > +   if (!xa_load(&ioasid_sets, set->sid)) {
> > +   pr_warn("Invalid set\n");
> > +   return INVALID_IOASID;
> > +   }
> > +
> > +   xa_for_each(&set->xa, index, entry) {
> > +   if (spid == entry->spid) {
> > +   pr_debug("Found ioasid %lu by spid %u\n", index, spid);
> > +   refcount_inc(&entry->users);  
> 
> Nothing prevents ioasid_free() from concurrently dropping the refcount to
> zero and calling ioasid_do_free(). The caller will later call ioasid_put()
> on a stale/reallocated index.
> 
right, need to add  spin_lock(&ioasid_allocator_lock);

> > +   return index;
> > +   }
> > +   }
> > +   return INVALID_IOASID;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_find_by_spid);
> > +
> > +/**
> >   * ioasid_alloc - Allocate an IOASID
> >   * @set: the IOASID set
> >   * @min: the minimum ID (inclusive)
> > diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
> > index 310abe4187a3..d4b3e83672f6 100644
> > --- a/include/linux/ioasid.h
> > +++ b/include/linux/ioasid.h
> > @@ -73,6 +73,8 @@ bool ioasid_is_active(ioasid_t ioasid);
> >  
> >  void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool 
> > (*getter)(void *));
> >  int ioasid_attach_data(ioasid_t ioasid, void *data);
> > +int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid);
> > +ioasid_t ioasid_find_by_spid(struct ioasid_set *set, 

Re: [PATCH v2 5/9] iommu/ioasid: Introduce ioasid_set private ID

2020-09-08 Thread Jacob Pan
On Tue, 1 Sep 2020 17:38:44 +0200
Auger Eric  wrote:

> Hi Jacob,
> On 8/22/20 6:35 AM, Jacob Pan wrote:
> > When an IOASID set is used for guest SVA, each VM will acquire its
> > ioasid_set for IOASID allocations. IOASIDs within the VM must have a
> > host/physical IOASID backing, mapping between guest and host
> > IOASIDs can be non-identical. IOASID set private ID (SPID) is
> > introduced in this patch to be used as guest IOASID. However, the
> > concept of ioasid_set specific namespace is generic, thus named
> > SPID.
> > 
> > As SPID namespace is within the IOASID set, the IOASID core can
> > provide lookup services at both directions. SPIDs may not be
> > allocated when its IOASID is allocated, the mapping between SPID
> > and IOASID is usually established when a guest page table is bound
> > to a host PASID.
> > 
> > Signed-off-by: Jacob Pan 
> > ---
> >  drivers/iommu/ioasid.c | 54
> > ++
> > include/linux/ioasid.h | 12 +++ 2 files changed, 66
> > insertions(+)
> > 
> > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> > index 5f31d63c75b1..c0aef38a4fde 100644
> > --- a/drivers/iommu/ioasid.c
> > +++ b/drivers/iommu/ioasid.c
> > @@ -21,6 +21,7 @@ enum ioasid_state {
> >   * struct ioasid_data - Meta data about ioasid
> >   *
> >   * @id:Unique ID
> > + * @spid:  Private ID unique within a set
> >   * @users  Number of active users
> >   * @state  Track state of the IOASID
> >   * @setMeta data of the set this IOASID belongs to
> > @@ -29,6 +30,7 @@ enum ioasid_state {
> >   */
> >  struct ioasid_data {
> > ioasid_t id;
> > +   ioasid_t spid;
> > struct ioasid_set *set;
> > refcount_t users;
> > enum ioasid_state state;
> > @@ -326,6 +328,58 @@ int ioasid_attach_data(ioasid_t ioasid, void
> > *data) EXPORT_SYMBOL_GPL(ioasid_attach_data);
> >  
> >  /**
> > + * ioasid_attach_spid - Attach ioasid_set private ID to an IOASID
> > + *
> > + * @ioasid: the ID to attach
> > + * @spid:   the ioasid_set private ID of @ioasid
> > + *
> > + * For IOASID that is already allocated, private ID within the set
> > can be
> > + * attached via this API. Future lookup can be done via
> > ioasid_find.  
> I would remove "For IOASID that is already allocated, private ID
> within the set can be attached via this API"
I guess it is implied. Will remove.

> > + */
> > +int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid)
> > +{
> > +   struct ioasid_data *ioasid_data;
> > +   int ret = 0;
> > +
> > +   spin_lock(&ioasid_allocator_lock);  
> We keep on saying the SPID is local to an IOASID set but we don't
> check any IOASID set contains this ioasid. It looks a bit weird to me.
We store ioasid_set inside ioasid_data when an IOASID is allocated, so
we don't need to search all the ioasid_sets. Perhaps I missed your
point?

> > +   ioasid_data = xa_load(&active_allocator->xa, ioasid);
> > +
> > +   if (!ioasid_data) {
> > +   pr_err("No IOASID entry %d to attach SPID %d\n",
> > +   ioasid, spid);
> > +   ret = -ENOENT;
> > +   goto done_unlock;
> > +   }
> > +   ioasid_data->spid = spid;  
> is there any way/need to remove an spid binding?
For guest SVA, we attach SPID as a guest PASID during bind guest page
table. Unbind does the opposite, ioasid_attach_spid() with
spid=INVALID_IOASID clears the binding.

Perhaps add more symmetric functions? i.e.
ioasid_detach_spid(ioasid_t ioasid)
ioasid_attach_spid(struct ioasid_set *set, ioasid_t ioasid)

> > +
> > +done_unlock:
> > +   spin_unlock(&ioasid_allocator_lock);
> > +   return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_attach_spid);
> > +
> > +ioasid_t ioasid_find_by_spid(struct ioasid_set *set, ioasid_t spid)
> > +{
> > +   struct ioasid_data *entry;
> > +   unsigned long index;
> > +
> > +   if (!xa_load(&ioasid_sets, set->sid)) {
> > +   pr_warn("Invalid set\n");
> > +   return INVALID_IOASID;
> > +   }
> > +
> > +   xa_for_each(&set->xa, index, entry) {
> > +   if (spid == entry->spid) {
> > +   pr_debug("Found ioasid %lu by spid %u\n",
> > index, spid);
> > +   refcount_inc(&entry->users);
> > +   return index;
> > +   }
> > +   }
> > +   return INVALID_IOASID;
> > +}
> > +EXPORT_SYMBOL_GPL(ioasid_find_by_spid);
> > +
> > +/**
> >   * ioasid_alloc - Allocate an IOASID
> >   * @set: the IOASID set
> >   * @min: the minimum ID (inclusive)
> > diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
> > index 310abe4187a3..d4b3e83672f6 100644
> > --- a/include/linux/ioasid.h
> > +++ b/include/linux/ioasid.h
> > @@ -73,6 +73,8 @@ bool ioasid_is_active(ioasid_t ioasid);
> >  
> >  void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool
> > (*getter)(void *)); int ioasid_attach_data(ioasid_t ioasid, void
> > *data); +int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid);
> > +ioasid_t ioasid_find_by_spid(struct ioasid_set *set, ioasid_t
> >

Re: [Intel-gfx] [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api

2020-09-08 Thread Tom Murphy
On Tue, 8 Sep 2020 at 16:56, Tvrtko Ursulin
 wrote:
>
>
> On 08/09/2020 16:44, Logan Gunthorpe wrote:
> > On 2020-09-08 9:28 a.m., Tvrtko Ursulin wrote:
> >>>
> >>> diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h
> >>> b/drivers/gpu/drm/i915/i915
> >>> index b7b59328cb76..9367ac801f0c 100644
> >>> --- a/drivers/gpu/drm/i915/i915_scatterlist.h
> >>> +++ b/drivers/gpu/drm/i915/i915_scatterlist.h
> >>> @@ -27,13 +27,19 @@ static __always_inline struct sgt_iter {
> >>>} __sgt_iter(struct scatterlist *sgl, bool dma) {
> >>>   struct sgt_iter s = { .sgp = sgl };
> >>>
> >>> +   if (sgl && !sg_dma_len(s.sgp))
> >>
> >> I'd extend the condition to be, just to be safe:
> >>  if (dma && sgl && !sg_dma_len(s.sgp))
> >>
> >
> > Right, good catch, that's definitely necessary.
> >
> >>> +   s.sgp = NULL;
> >>> +
> >>>   if (s.sgp) {
> >>>   s.max = s.curr = s.sgp->offset;
> >>> -   s.max += s.sgp->length;
> >>> -   if (dma)
> >>> +
> >>> +   if (dma) {
> >>> +   s.max += sg_dma_len(s.sgp);
> >>>   s.dma = sg_dma_address(s.sgp);
> >>> -   else
> >>> +   } else {
> >>> +   s.max += s.sgp->length;
> >>>   s.pfn = page_to_pfn(sg_page(s.sgp));
> >>> +   }
> >>
> >> Otherwise has this been tested or alternatively how to test it? (How to
> >> repro the issue.)
> >
> > It has not been tested. To test it, you need Tom's patch set without the
> > last "DO NOT MERGE" patch:
> >
> > https://lkml.kernel.org/lkml/20200907070035.ga25...@infradead.org/T/
>
> Tom, do you have a branch somewhere I could pull from? (Just being lazy
> about downloading a bunch of messages from the archives.)

I don't unfortunately. I'm working locally with poor internet.

>
> What GPU is in your Lenovo x1 carbon 5th generation and what
> graphical/desktop setup I need to repro?


Is this enough info?:

$ lspci -vnn | grep VGA -A 12
00:02.0 VGA compatible controller [0300]: Intel Corporation HD
Graphics 620 [8086:5916] (rev 02) (prog-if 00 [VGA controller])
Subsystem: Lenovo ThinkPad X1 Carbon 5th Gen [17aa:224f]
Flags: bus master, fast devsel, latency 0, IRQ 148
Memory at eb00 (64-bit, non-prefetchable) [size=16M]
Memory at 6000 (64-bit, prefetchable) [size=256M]
I/O ports at e000 [size=64]
[virtual] Expansion ROM at 000c [disabled] [size=128K]
Capabilities: [40] Vendor Specific Information: Len=0c 
Capabilities: [70] Express Root Complex Integrated Endpoint, MSI 00
Capabilities: [ac] MSI: Enable+ Count=1/1 Maskable- 64bit-
Capabilities: [d0] Power Management version 2
Capabilities: [100] Process Address Space ID (PASID)
Capabilities: [200] Address Translation Service (ATS)


>
> Regards,
>
> Tvrtko
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v8 6/7] iommu/uapi: Handle data and argsz filled by users

2020-09-08 Thread Alex Williamson
On Mon, 31 Aug 2020 11:24:59 -0700
Jacob Pan  wrote:

> IOMMU user APIs are responsible for processing user data. This patch
> changes the interface such that user pointers can be passed into IOMMU
> code directly. Separate kernel APIs without user pointers are introduced
> for in-kernel users of the UAPI functionality.
> 
> IOMMU UAPI data has a user filled argsz field which indicates the data
> length of the structure. User data is not trusted, argsz must be
> validated based on the current kernel data size, mandatory data size,
> and feature flags.
> 
> User data may also be extended, resulting in possible argsz increase.
> Backward compatibility is ensured based on size and flags (or
> the functional equivalent fields) checking.
> 
> This patch adds sanity checks in the IOMMU layer. In addition to argsz,
> reserved/unused fields in padding, flags, and version are also checked.
> Details are documented in Documentation/userspace-api/iommu.rst
> 
> Signed-off-by: Liu Yi L 
> Signed-off-by: Jacob Pan 
> ---
>  drivers/iommu/iommu.c | 201 
> --
>  include/linux/iommu.h |  28 ---
>  2 files changed, 212 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
> index 4ae02291ccc2..3bc263ae31ed 100644
> --- a/drivers/iommu/iommu.c
> +++ b/drivers/iommu/iommu.c
> @@ -1961,33 +1961,218 @@ int iommu_attach_device(struct iommu_domain *domain, 
> struct device *dev)
>  }
>  EXPORT_SYMBOL_GPL(iommu_attach_device);
>  
> +/*
> + * Check flags and other user provided data for valid combinations. We also
> + * make sure no reserved fields or unused flags are set. This is to ensure
> + * not breaking userspace in the future when these fields or flags are used.
> + */
> +static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info 
> *info)
> +{
> + u32 mask;
> + int i;
> +
> + if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
> + return -EINVAL;
> +
> + mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
> + if (info->cache & ~mask)
> + return -EINVAL;
> +
> + if (info->granularity >= IOMMU_INV_GRANU_NR)
> + return -EINVAL;
> +
> + switch (info->granularity) {
> + case IOMMU_INV_GRANU_ADDR:
> + if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
> + return -EINVAL;
> +
> + mask = IOMMU_INV_ADDR_FLAGS_PASID |
> + IOMMU_INV_ADDR_FLAGS_ARCHID |
> + IOMMU_INV_ADDR_FLAGS_LEAF;
> +
> + if (info->granu.addr_info.flags & ~mask)
> + return -EINVAL;
> + break;
> + case IOMMU_INV_GRANU_PASID:
> + mask = IOMMU_INV_PASID_FLAGS_PASID |
> + IOMMU_INV_PASID_FLAGS_ARCHID;
> + if (info->granu.pasid_info.flags & ~mask)
> + return -EINVAL;
> +
> + break;
> + case IOMMU_INV_GRANU_DOMAIN:
> + if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
> + return -EINVAL;
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + /* Check reserved padding fields */
> + for (i = 0; i < sizeof(info->padding); i++) {
> + if (info->padding[i])
> + return -EINVAL;
> + }
> +
> + return 0;
> +}
> +
>  int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device 
> *dev,
> - struct iommu_cache_invalidate_info *inv_info)
> + void __user *uinfo)
>  {
> + struct iommu_cache_invalidate_info inv_info = { 0 };
> + u32 minsz;
> + int ret = 0;
> +
>   if (unlikely(!domain->ops->cache_invalidate))
>   return -ENODEV;
>  
> - return domain->ops->cache_invalidate(domain, dev, inv_info);
> + /*
> +  * No new spaces can be added before the variable sized union, the
> +  * minimum size is the offset to the union.
> +  */
> + minsz = offsetof(struct iommu_cache_invalidate_info, granu);
> +
> + /* Copy minsz from user to get flags and argsz */
> + if (copy_from_user(&inv_info, uinfo, minsz))
> + return -EFAULT;
> +
> + /* Fields before variable size union is mandatory */
> + if (inv_info.argsz < minsz)
> + return -EINVAL;
> +
> + /* PASID and address granu require additional info beyond minsz */
> + if (inv_info.argsz == minsz &&
> + ((inv_info.granularity == IOMMU_INV_GRANU_PASID) ||
> + (inv_info.granularity == IOMMU_INV_GRANU_ADDR)))
> + return -EINVAL;
> +
> + if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
> + inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, 
> granu.pasid_info))
> + return -EINVAL;
> +
> + if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
> + inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, 
> gr

Re: [PATCH V2 2/5] iommu: Add iommu_dma_free_cpu_cached_iovas function

2020-09-08 Thread Lu Baolu

On 9/4/20 4:18 AM, Tom Murphy wrote:

to dma-iommu ops

Add a iommu_dma_free_cpu_cached_iovas function to allow drivers which
use the dma-iommu ops to free cached cpu iovas.

Signed-off-by: Tom Murphy 
---
  drivers/iommu/dma-iommu.c | 9 +
  include/linux/dma-iommu.h | 3 +++
  2 files changed, 12 insertions(+)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f69dc9467d71..33f3f4f5edc5 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -50,6 +50,15 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
  };
  
+void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,

+   struct iommu_domain *domain)
+{
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = &cookie->iovad;
+
+   free_cpu_cached_iovas(cpu, iovad);
+}
+
  static void iommu_dma_entry_dtor(unsigned long data)
  {
struct page *freelist = (struct page *)data;
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 2112f21f73d8..316d22a4a860 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -37,6 +37,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
  
  void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
  
+void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,

+   struct iommu_domain *domain);
+
  #else /* CONFIG_IOMMU_DMA */
  
  struct iommu_domain;




I will add below in the next version:

diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 37df037788f0..ab4bffea3aaa 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -81,5 +81,10 @@ static inline void iommu_dma_get_resv_regions(struct 
device *dev, struct list_he

 {
 }

+static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+  struct iommu_domain 
*domain)

+{
+}
+
 #endif /* CONFIG_IOMMU_DMA */
 #endif /* __DMA_IOMMU_H */

Others looks good to me.

Best regards,
baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH V2 3/5] iommu: allow the dma-iommu api to use bounce buffers

2020-09-08 Thread Lu Baolu

On 9/4/20 4:18 AM, Tom Murphy wrote:

Allow the dma-iommu api to use bounce buffers for untrusted devices.
This is a copy of the intel bounce buffer code.

Signed-off-by: Tom Murphy 
---
  drivers/iommu/dma-iommu.c   | 94 ++---
  drivers/iommu/intel/iommu.c |  6 +++
  drivers/iommu/iommu.c   | 10 
  include/linux/iommu.h   |  7 +++
  4 files changed, 101 insertions(+), 16 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 33f3f4f5edc5..185cd504ca5a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,9 +21,11 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
+#include 
  
  struct iommu_dma_msi_page {

struct list_headlist;
@@ -498,26 +500,87 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
  }
  
+static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,

+   size_t size, enum dma_data_direction dir,
+   unsigned long attrs)
+{
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = &cookie->iovad;
+   size_t iova_off = iova_offset(iovad, dma_addr);
+   size_t aligned_size = iova_align(iovad, size + iova_off);
+   phys_addr_t phys;
+
+   phys = iommu_iova_to_phys(domain, dma_addr);
+   if (WARN_ON(!phys))
+   return;
+
+   __iommu_dma_unmap(dev, dma_addr, size);
+
+#ifdef CONFIG_SWIOTLB
+   if (unlikely(is_swiotlb_buffer(phys)))
+   swiotlb_tbl_unmap_single(dev, phys, size,
+   aligned_size, dir, attrs);
+#endif
+}
+
  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-   size_t size, int prot, u64 dma_mask)
+   size_t org_size, dma_addr_t dma_mask, bool coherent,
+   enum dma_data_direction dir, unsigned long attrs)
  {
+   int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, phys);
+   size_t aligned_size = iova_align(iovad, org_size + iova_off);
+   void *padding_start;
+   size_t padding_size;
dma_addr_t iova;
  
  	if (unlikely(iommu_dma_deferred_attach(dev, domain)))

return DMA_MAPPING_ERROR;
  
-	size = iova_align(iovad, size + iova_off);

+#ifdef CONFIG_SWIOTLB
+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (iommu_needs_bounce_buffer(dev)
+   && !iova_offset(iovad, phys | org_size)) {
+   phys = swiotlb_tbl_map_single(dev,
+   __phys_to_dma(dev, io_tlb_start),
+   phys, org_size, aligned_size, dir, attrs);
+
+   if (phys == DMA_MAPPING_ERROR)
+   return DMA_MAPPING_ERROR;
+
+   /* Cleanup the padding area. */
+   padding_start = phys_to_virt(phys);
+   padding_size = aligned_size;
+
+   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+   (dir == DMA_TO_DEVICE ||
+dir == DMA_BIDIRECTIONAL)) {
+   padding_start += org_size;
+   padding_size -= org_size;
+   }
  
-	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);

+   memset(padding_start, 0, padding_size);
+   }
+#endif
+
+   iova = iommu_dma_alloc_iova(domain, aligned_size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
  
-	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {

-   iommu_dma_free_iova(cookie, iova, size, NULL);
+   if (iommu_map_atomic(domain, iova, phys - iova_off, aligned_size,
+   prot)) {
+
+   if (unlikely(is_swiotlb_buffer(phys)))
+   swiotlb_tbl_unmap_single(dev, phys, aligned_size,
+   aligned_size, dir, attrs);
+   iommu_dma_free_iova(cookie, iova, aligned_size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -751,10 +814,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, 
struct page *page,
  {
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
-   int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
  
-	dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));

+   dma_handle = __iommu_dma_map(dev, phys, size, 

Re: [PATCH V2 5/5] DO NOT MERGE: iommu: disable list appending in dma-iommu

2020-09-08 Thread Lu Baolu

Hi Christoph,

On 9/8/20 2:23 PM, Christoph Hellwig wrote:

On Tue, Sep 08, 2020 at 02:04:53PM +0800, Lu Baolu wrote:

Do you mind telling where can I find Marek's series?


[PATCH v10 00/30] DRM: fix struct sg_table nents vs. orig_nents misuse

on various lists including the iommu one.



It seems that more work is needed in i915 driver. I will added below
quirk as you suggested.

--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -851,6 +851,31 @@ static int __finalise_sg(struct device *dev, struct 
scatterlist *sg, int nents,

unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;

+   /*
+* The Intel graphic device driver is used to assume that the 
returned
+* sg list is not combound. This blocks the efforts of 
converting the

+* Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
+* device driver work and should be removed once it's fixed in i915
+* driver.
+*/
+   if (dev_is_pci(dev) &&
+   to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
+   (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+   for_each_sg(sg, s, nents, i) {
+   unsigned int s_iova_off = sg_dma_address(s);
+   unsigned int s_length = sg_dma_len(s);
+   unsigned int s_iova_len = s->length;
+
+   s->offset += s_iova_off;
+   s->length = s_length;
+   sg_dma_address(s) = dma_addr + s_iova_off;
+   sg_dma_len(s) = s_length;
+   dma_addr += s_iova_len;
+   }
+
+   return nents;
+   }
+

Best regards,
baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH V2 4/5] iommu/vt-d: Convert intel iommu driver to the iommu ops

2020-09-08 Thread Lu Baolu

On 9/4/20 4:18 AM, Tom Murphy wrote:

+static int intel_iommu_needs_bounce_buffer(struct device *d)
+{
+   return !intel_no_bounce && dev_is_pci(d) && to_pci_dev(d)->untrusted;
+}
+
+
  static void intel_iommu_probe_finalize(struct device *dev)
  {
-   struct iommu_domain *domain;
+   dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
+   struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+   struct dmar_domain *dmar_domain = to_dmar_domain(domain);
  
-	domain = iommu_get_domain_for_dev(dev);

-   if (device_needs_bounce(dev))
-   set_dma_ops(dev, &bounce_dma_ops);
-   else if (domain && domain->type == IOMMU_DOMAIN_DMA)
-   set_dma_ops(dev, &intel_dma_ops);
+   if (intel_iommu_needs_bounce_buffer(dev) ||


For untrusted devices, the DMA type of domain is enforced. There's no
need to check again here.

Best regards,
baolu


+   (domain && domain->type == IOMMU_DOMAIN_DMA))
+   iommu_setup_dma_ops(dev, base,
+   __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
else
set_dma_ops(dev, NULL);
  }
  

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH] iommu/dma: Fix IOVA reserve dma ranges

2020-09-08 Thread Srinath Mannam via iommu
Fix IOVA reserve failure for memory regions listed in dma-ranges in the
following cases.

- start address of memory region is 0x0.
- end address of a memory region is equal to start address of next memory
  region.

Fixes: aadad097cd46f ("iommu/dma: Reserve IOVA for PCIe inaccessible DMA 
address")
Signed-off-by: Srinath Mannam 
---
 drivers/iommu/dma-iommu.c | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5141d49a046b..0a3f67a4f9ae 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -213,14 +213,21 @@ static int iova_reserve_pci_windows(struct pci_dev *dev,
resource_list_for_each_entry(window, &bridge->dma_ranges) {
end = window->res->start - window->offset;
 resv_iova:
+   if (end < start) {
+   /* dma_ranges list should be sorted */
+   dev_err(&dev->dev, "Failed to reserve IOVA\n");
+   return -EINVAL;
+   }
+   /*
+* Skip the cases when start address of first memory region is
+* 0x0 and end address of one memory region and start address
+* of next memory region are equal. Reserve IOVA for rest of
+* addresses fall in between given memory ranges.
+*/
if (end > start) {
lo = iova_pfn(iovad, start);
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
-   } else {
-   /* dma_ranges list should be sorted */
-   dev_err(&dev->dev, "Failed to reserve IOVA\n");
-   return -EINVAL;
}
 
start = window->res->end - window->offset + 1;
-- 
2.17.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v11 07/11] device-mapping: Introduce DMA range map, supplanting dma_pfn_offset

2020-09-08 Thread Nathan Chancellor
On Tue, Sep 08, 2020 at 11:43:45AM +0200, Christoph Hellwig wrote:
> And because I like replying to myself so much, here is a link to the
> version with the arm cleanup patch applied.  Unlike the previous two
> attempts this has at least survived very basic sanity testing:
> 
> http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/dma-ranges.2

Tested-by: Nathan Chancellor 

Thank you for fixing this up quickly!
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu