Re: [PATCH 3/4] iommu: remove the put_resv_regions method

2022-07-09 Thread Christoph Hellwig
On Fri, Jul 08, 2022 at 05:00:59PM +0800, Baolu Lu wrote:
> Do we really need to export this symbol? It is not used beyond the iommu
> core code.

virtio-iommu calls it and can be modular.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 3/4] iommu: remove the put_resv_regions method

2022-07-09 Thread Christoph Hellwig
On Fri, Jul 08, 2022 at 11:12:45AM +0100, Will Deacon wrote:
> Heads up, but I think this might collide (trivially?) with:
> 
> https://lore.kernel.org/r/20220615101044.1972-1-shameerali.kolothum.th...@huawei.com
> 
> which Joerg has queued up already. It looks like the cleanup still makes
> sense though, so that's good.

This series sits on top of that one - I waited for it to hit the IOMMU
tree before resending to avoid the conflict.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 4/4] iommu/arm-smmu-v3: cleanup arm_smmu_dev_{enable, disable}_feature

2022-07-09 Thread Christoph Hellwig
Fold the arm_smmu_dev_has_feature arm_smmu_dev_feature_enabled into
the main methods.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 55 ++---
 1 file changed, 14 insertions(+), 41 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 4a5e435567f17..d32b02336411d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2765,58 +2765,27 @@ static void arm_smmu_get_resv_regions(struct device 
*dev,
iommu_dma_get_resv_regions(dev, head);
 }
 
-static bool arm_smmu_dev_has_feature(struct device *dev,
-enum iommu_dev_features feat)
-{
-   struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
-   if (!master)
-   return false;
-
-   switch (feat) {
-   case IOMMU_DEV_FEAT_IOPF:
-   return arm_smmu_master_iopf_supported(master);
-   case IOMMU_DEV_FEAT_SVA:
-   return arm_smmu_master_sva_supported(master);
-   default:
-   return false;
-   }
-}
-
-static bool arm_smmu_dev_feature_enabled(struct device *dev,
-enum iommu_dev_features feat)
-{
-   struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
-   if (!master)
-   return false;
-
-   switch (feat) {
-   case IOMMU_DEV_FEAT_IOPF:
-   return master->iopf_enabled;
-   case IOMMU_DEV_FEAT_SVA:
-   return arm_smmu_master_sva_enabled(master);
-   default:
-   return false;
-   }
-}
-
 static int arm_smmu_dev_enable_feature(struct device *dev,
   enum iommu_dev_features feat)
 {
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
 
-   if (!arm_smmu_dev_has_feature(dev, feat))
+   if (!master)
return -ENODEV;
 
-   if (arm_smmu_dev_feature_enabled(dev, feat))
-   return -EBUSY;
-
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+   if (!arm_smmu_master_iopf_supported(master))
+   return -EINVAL;
+   if (master->iopf_enabled)
+   return -EBUSY;
master->iopf_enabled = true;
return 0;
case IOMMU_DEV_FEAT_SVA:
+   if (!arm_smmu_master_sva_supported(master))
+   return -EINVAL;
+   if (arm_smmu_master_sva_enabled(master))
+   return -EBUSY;
return arm_smmu_master_enable_sva(master);
default:
return -EINVAL;
@@ -2828,16 +2797,20 @@ static int arm_smmu_dev_disable_feature(struct device 
*dev,
 {
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
 
-   if (!arm_smmu_dev_feature_enabled(dev, feat))
+   if (!master)
return -EINVAL;
 
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+   if (!master->iopf_enabled)
+   return -EINVAL;
if (master->sva_enabled)
return -EBUSY;
master->iopf_enabled = false;
return 0;
case IOMMU_DEV_FEAT_SVA:
+   if (!arm_smmu_master_sva_enabled(master))
+   return -EINVAL;
return arm_smmu_master_disable_sva(master);
default:
return -EINVAL;
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH V3] swiotlb: Split up single swiotlb lock

2022-07-09 Thread Christoph Hellwig
On Thu, Jul 07, 2022 at 04:24:36AM -0400, Tianyu Lan wrote:
> From: Tianyu Lan 
> 
> Traditionally swiotlb was not performance critical because it was only
> used for slow devices. But in some setups, like TDX/SEV confidential
> guests, all IO has to go through swiotlb. Currently swiotlb only has a
> single lock. Under high IO load with multiple CPUs this can lead to
> significat lock contention on the swiotlb lock.
> 
> This patch splits the swiotlb bounce buffer pool into individual areas
> which have their own lock. Each CPU tries to allocate in its own area
> first. Only if that fails does it search other areas. On freeing the
> allocation is freed into the area where the memory was originally
> allocated from.
> 
> Area number can be set via swiotlb kernel parameter and is default
> to be possible cpu number. If possible cpu number is not power of
> 2, area number will be round up to the next power of 2.
> 
> This idea from Andi Kleen patch(https://github.com/intel/tdx/commit/
> 4529b5784c141782c72ec9bd9a92df2b68cb7d45).

Thanks, this looks much better.  I think there is a small problem
with how default_nareas is set - we need to use 0 as the default
so that an explicit command line value of 1 works.  Als have you
checked the interaction with swiotlb_adjust_size in detail?

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 5536d2cd69d30..85b1c29dd0eb8 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -70,7 +70,7 @@ struct io_tlb_mem io_tlb_default_mem;
 phys_addr_t swiotlb_unencrypted_base;
 
 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
-static unsigned long default_nareas = 1;
+static unsigned long default_nareas;
 
 /**
  * struct io_tlb_area - IO TLB memory area descriptor
@@ -90,7 +90,10 @@ struct io_tlb_area {
 
 static void swiotlb_adjust_nareas(unsigned int nareas)
 {
-   if (!is_power_of_2(nareas))
+   if (default_nareas)
+   return;
+
+   if (nareas > 1 && !is_power_of_2(nareas))
nareas = roundup_pow_of_two(nareas);
 
default_nareas = nareas;
@@ -338,8 +341,7 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
  __func__, alloc_size, PAGE_SIZE);
 
-   if (default_nareas == 1)
-   swiotlb_adjust_nareas(num_possible_cpus());
+   swiotlb_adjust_nareas(num_possible_cpus());
 
mem->areas = memblock_alloc(sizeof(struct io_tlb_area) *
default_nareas, SMP_CACHE_BYTES);
@@ -410,8 +412,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
(PAGE_SIZE << order) >> 20);
}
 
-   if (default_nareas == 1)
-   swiotlb_adjust_nareas(num_possible_cpus());
+   swiotlb_adjust_nareas(num_possible_cpus());
 
area_order = get_order(array_size(sizeof(*mem->areas),
default_nareas));
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 1/4] iommu: remove the unused dev_has_feat method

2022-07-09 Thread Christoph Hellwig
This method is never actually called.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 1 -
 include/linux/iommu.h   | 4 +---
 2 files changed, 1 insertion(+), 4 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index d9c1623ec1a9a..1b6c17dd81ee4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2853,7 +2853,6 @@ static struct iommu_ops arm_smmu_ops = {
.of_xlate   = arm_smmu_of_xlate,
.get_resv_regions   = arm_smmu_get_resv_regions,
.put_resv_regions   = generic_iommu_put_resv_regions,
-   .dev_has_feat   = arm_smmu_dev_has_feature,
.dev_feat_enabled   = arm_smmu_dev_feature_enabled,
.dev_enable_feat= arm_smmu_dev_enable_feature,
.dev_disable_feat   = arm_smmu_dev_disable_feature,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e6abd998dbe73..a3acdb46b9391 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -164,8 +164,7 @@ struct iommu_iort_rmr_data {
  *  supported, this feature must be enabled before and
  *  disabled after %IOMMU_DEV_FEAT_SVA.
  *
- * Device drivers query whether a feature is supported using
- * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
+ * Device drivers enable a feature using iommu_dev_enable_feature().
  */
 enum iommu_dev_features {
IOMMU_DEV_FEAT_SVA,
@@ -248,7 +247,6 @@ struct iommu_ops {
bool (*is_attach_deferred)(struct device *dev);
 
/* Per device IOMMU features */
-   bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 3/4] iommu: remove the put_resv_regions method

2022-07-09 Thread Christoph Hellwig
All drivers that implement get_resv_regions just use
generic_put_resv_regions to implement the put side.  Remove the
indirections and document the allocations constraints.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/amd/iommu.c   |  1 -
 drivers/iommu/apple-dart.c  |  1 -
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c |  1 -
 drivers/iommu/arm/arm-smmu/arm-smmu.c   |  1 -
 drivers/iommu/intel/iommu.c |  1 -
 drivers/iommu/iommu.c   | 21 -
 drivers/iommu/mtk_iommu.c   |  1 -
 drivers/iommu/virtio-iommu.c|  5 ++---
 include/linux/iommu.h   |  4 
 9 files changed, 6 insertions(+), 30 deletions(-)

diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 840831d5d2ad9..e66e071e8c3b9 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2280,7 +2280,6 @@ const struct iommu_ops amd_iommu_ops = {
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
-   .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap  = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index e87d3cf54ed68..1b17257592626 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -768,7 +768,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.of_xlate = apple_dart_of_xlate,
.def_domain_type = apple_dart_def_domain_type,
.get_resv_regions = apple_dart_get_resv_regions,
-   .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 4d30a8d2bc236..4a5e435567f17 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2852,7 +2852,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group   = arm_smmu_device_group,
.of_xlate   = arm_smmu_of_xlate,
.get_resv_regions   = arm_smmu_get_resv_regions,
-   .put_resv_regions   = generic_iommu_put_resv_regions,
.dev_enable_feat= arm_smmu_dev_enable_feature,
.dev_disable_feat   = arm_smmu_dev_disable_feature,
.sva_bind   = arm_smmu_sva_bind,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 588929bed1bc3..2d4129a4ccfc0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1584,7 +1584,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group   = arm_smmu_device_group,
.of_xlate   = arm_smmu_of_xlate,
.get_resv_regions   = arm_smmu_get_resv_regions,
-   .put_resv_regions   = generic_iommu_put_resv_regions,
.def_domain_type= arm_smmu_def_domain_type,
.pgsize_bitmap  = -1UL, /* Restricted during device attach */
.owner  = THIS_MODULE,
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 44016594831de..49d616aa21489 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4911,7 +4911,6 @@ const struct iommu_ops intel_iommu_ops = {
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions   = intel_iommu_get_resv_regions,
-   .put_resv_regions   = generic_iommu_put_resv_regions,
.device_group   = intel_iommu_device_group,
.dev_enable_feat= intel_iommu_dev_enable_feat,
.dev_disable_feat   = intel_iommu_dev_disable_feat,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 1bb016a6a2aa1..f53f8b2d27a54 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2579,27 +2579,14 @@ void iommu_get_resv_regions(struct device *dev, struct 
list_head *list)
ops->get_resv_regions(dev, list);
 }
 
-void iommu_put_resv_regions(struct device *dev, struct list_head *list)
-{
-   const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-   if (ops->put_resv_regions)
-   ops->put_resv_regions(dev, list);
-}
-
 /**
- * generic_iommu_put_resv_regions - Reserved region driver helper
+ * iommu_put_resv_regions - release resered regions
  * @dev: device for which to free reserved regions
  * @list: reserved region list for device
  *
- * IOMMU drivers can use this to implement their .put_resv_regions() callback
- * for simple reservations. If

Re: fully convert arm to use dma-direct v3

2022-07-09 Thread Christoph Hellwig
On Thu, Jul 07, 2022 at 10:09:27AM +0200, Greg Kroah-Hartman wrote:
> > Anyone who has real concerns, please scream now.
> 
> Sounds like a good plan to me, pull it in and let's see if anyone even
> notices.

Ok, I've added the series to the dma-mapping tree now.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 2/4] iommu: remove iommu_dev_feature_enabled

2022-07-09 Thread Christoph Hellwig
Remove the unused iommu_dev_feature_enabled function.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c |  1 -
 drivers/iommu/iommu.c   | 13 -
 include/linux/iommu.h   |  9 -
 3 files changed, 23 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 1b6c17dd81ee4..4d30a8d2bc236 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2853,7 +2853,6 @@ static struct iommu_ops arm_smmu_ops = {
.of_xlate   = arm_smmu_of_xlate,
.get_resv_regions   = arm_smmu_get_resv_regions,
.put_resv_regions   = generic_iommu_put_resv_regions,
-   .dev_feat_enabled   = arm_smmu_dev_feature_enabled,
.dev_enable_feat= arm_smmu_dev_enable_feature,
.dev_disable_feat   = arm_smmu_dev_disable_feature,
.sva_bind   = arm_smmu_sva_bind,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0aa141646bdf4..1bb016a6a2aa1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2760,19 +2760,6 @@ int iommu_dev_disable_feature(struct device *dev, enum 
iommu_dev_features feat)
 }
 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
 
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features 
feat)
-{
-   if (dev->iommu && dev->iommu->iommu_dev) {
-   const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
-
-   if (ops->dev_feat_enabled)
-   return ops->dev_feat_enabled(dev, feat);
-   }
-
-   return false;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-
 /**
  * iommu_sva_bind_device() - Bind a process address space to a device
  * @dev: the device
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a3acdb46b9391..0bc2eb14b0262 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -215,7 +215,6 @@ struct iommu_iotlb_gather {
  *  driver init to device driver init (default no)
  * @dev_has/enable/disable_feat: per device entries to check/enable/disable
  *   iommu specific features.
- * @dev_feat_enabled: check enabled feature
  * @sva_bind: Bind process address space to device
  * @sva_unbind: Unbind process address space from device
  * @sva_get_pasid: Get PASID associated to a SVA handle
@@ -247,7 +246,6 @@ struct iommu_ops {
bool (*is_attach_deferred)(struct device *dev);
 
/* Per device IOMMU features */
-   bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
 
@@ -670,7 +668,6 @@ void iommu_release_device(struct device *dev);
 
 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
 
 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm,
@@ -997,12 +994,6 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct 
fwnode_handle *fwnode)
return NULL;
 }
 
-static inline bool
-iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
-   return false;
-}
-
 static inline int
 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
 {
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


remove dead iommu code v2

2022-07-09 Thread Christoph Hellwig
Hi all,

this removes a bit of dead code and methods from the iommu code and the
cleans up the arm-smmu-v3 driver a little bit based on that.

Changes since v1:
 - rebased to the latest iommu/core branch
 - don't accidentally change an error code in arm_smmu_dev_enable_feature
 - add a kerneldoc comment to iommu_put_resv_regions

Diffstat:
 drivers/iommu/amd/iommu.c   |1 
 drivers/iommu/apple-dart.c  |1 
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c |   58 ++--
 drivers/iommu/arm/arm-smmu/arm-smmu.c   |1 
 drivers/iommu/intel/iommu.c |1 
 drivers/iommu/iommu.c   |   34 +---
 drivers/iommu/mtk_iommu.c   |1 
 drivers/iommu/virtio-iommu.c|5 --
 include/linux/iommu.h   |   17 
 9 files changed, 21 insertions(+), 98 deletions(-)
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH 2/2] dma-direct: Fix dma_direct_{alloc,free}() for Hyperv-V IVMs

2022-07-06 Thread Christoph Hellwig
On Wed, Jul 06, 2022 at 09:50:27PM +0200, Andrea Parri (Microsoft) wrote:
> @@ -305,6 +306,21 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>   ret = page_address(page);
>   if (dma_set_decrypted(dev, ret, size))
>   goto out_free_pages;
> +#ifdef CONFIG_HAS_IOMEM
> + /*
> +  * Remap the pages in the unencrypted physical address space
> +  * when dma_unencrypted_base is set (e.g., for Hyper-V AMD
> +  * SEV-SNP isolated guests).
> +  */
> + if (dma_unencrypted_base) {
> + phys_addr_t ret_pa = virt_to_phys(ret);
> +
> + ret_pa += dma_unencrypted_base;
> + ret = memremap(ret_pa, size, MEMREMAP_WB);
> + if (!ret)
> + goto out_encrypt_pages;
> + }
> +#endif


So:

this needs to move into dma_set_decrypted, otherwise we don't handle
the dma_alloc_pages case (never mind that this is pretty unreadable).

Which then again largely duplicates the code in swiotlb.  So I think
what we need here is a low-level helper that does the
set_memory_decrypted and memremap.  I'm not quite sure where it
should go, but maybe some of the people involved with memory
encryption might have good ideas.  unencrypted_base should go with
it and then both swiotlb and dma-direct can call it.

> + /*
> +  * If dma_unencrypted_base is set, the virtual address returned by
> +  * dma_direct_alloc() is in the vmalloc address range.
> +  */
> + if (!dma_unencrypted_base && is_vmalloc_addr(cpu_addr)) {
>   vunmap(cpu_addr);
>   } else {
>   if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
>   arch_dma_clear_uncached(cpu_addr, size);
> +#ifdef CONFIG_HAS_IOMEM
> + if (dma_unencrypted_base) {
> + memunmap(cpu_addr);
> + /* re-encrypt the pages using the original address */
> + cpu_addr = page_address(pfn_to_page(PHYS_PFN(
> + dma_to_phys(dev, dma_addr;
> + }
> +#endif
>   if (dma_set_encrypted(dev, cpu_addr, size))

Same on the unmap side.  It might also be worth looking into reordering
the checks in some form instead o that raw dma_unencrypted_base check
before the unmap.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: fully convert arm to use dma-direct v3

2022-07-06 Thread Christoph Hellwig
On Wed, Jun 29, 2022 at 08:41:32AM +0200, Greg Kroah-Hartman wrote:
> On Wed, Jun 29, 2022 at 08:28:37AM +0200, Christoph Hellwig wrote:
> > Any comments or additional testing?  It would be really great to get
> > this off the table.
> 
> For the USB bits:
> 
> Acked-by: Greg Kroah-Hartman 

So given that we're not making any progress on getting anyone interested
on the series, I'm tempted to just pull it into the dma-mapping tree
this weekend so that we'll finally have all architectures using the
common code.

Anyone who has real concerns, please scream now.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v5 0/5] DMA mapping changes for SCSI core

2022-07-06 Thread Christoph Hellwig
On Wed, Jul 06, 2022 at 02:40:44PM +0100, John Garry wrote:
> On 30/06/2022 13:08, John Garry wrote:
>
> Hi Christoph,
>
> Can you please consider picking up this series? A few things to note 
> beforehand:
>
> - I changed to only apply the mapping limit to SAS hosts in this version. I 
> would need a fresh ack from Martin for those SCSI parts, but wanted to make 
> sure you were ok with it.

Yes, I've mostly been waiting for an ACK from Martin.

> - Damien had some doubt on updating the shost max_sectors as opposed to the 
> per-request queue default, but I think he's ok with it - see patch 4/5

I'm fine either way.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 2/2] x86/ACPI: Set swiotlb area according to the number of lapic entry in MADT

2022-07-06 Thread Christoph Hellwig
On Wed, Jul 06, 2022 at 04:57:33PM +0800, Tianyu Lan wrote:
> Swiotlb_init() is called in the mem_init() of different architects and
> memblock free pages are released to the buddy allocator just after
> calling swiotlb_init() via memblock_free_all().

Yes.

> The mem_init() is called before smp_init().

But why would that matter?  cpu_possible_map is set up from
setup_arch(), which is called before that.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 2/2] x86/ACPI: Set swiotlb area according to the number of lapic entry in MADT

2022-07-06 Thread Christoph Hellwig
On Fri, Jul 01, 2022 at 01:02:21AM +0800, Tianyu Lan wrote:
> > Can we reorder that initialization?  Because I really hate having
> > to have an arch hook in every architecture.
> 
> How about using "flags" parameter of swiotlb_init() to pass area number
> or add new parameter for area number?
> 
> I just reposted patch 1 since there is just some coding style issue and area
> number may also set via swiotlb kernel parameter. We still need figure out a
> good solution to pass area number from architecture code.

What is the problem with calling swiotlb_init after nr_possible_cpus()
works?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-07-05 Thread Christoph Hellwig
On Tue, Jul 05, 2022 at 12:16:45PM -0600, Logan Gunthorpe wrote:
> The current version does it through a char device, but that requires
> creating a simple_fs and anon_inode for teardown on driver removal, plus
> a bunch of hooks through the driver that exposes it (NVMe, in this case)
> to set this all up.
> 
> Christoph is suggesting a sysfs interface which could potentially avoid
> the anon_inode and all of the extra hooks. It has some significant
> benefits and maybe some small downsides, but I wouldn't describe it as
> horrid.

Yeah, I don't think is is horrible, it fits in with the resource files
for the BARs, and solves a lot of problems.  Greg, can you explain
what would be so bad about it?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-07-05 Thread Christoph Hellwig
[note for the newcomers, this is about allowing mmap()ing the PCIe
P2P memory from the generic PCI P2P code through sysfs, and more
importantly how to revoke it on device removal]

On Tue, Jul 05, 2022 at 10:44:49AM -0600, Logan Gunthorpe wrote:
> We might be able to. I'm not sure. I'll have to figure out how to find
> that inode from the p2pdma code. I haven't found an obvious interface to
> do that.

I think the right way to approach this would be a new sysfs API
that internally calls unmap_mapping_range internally instead of
exposing the inode. I suspect that might actually be the right thing
to do for iomem_inode as well.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-07-05 Thread Christoph Hellwig
On Tue, Jul 05, 2022 at 10:41:52AM -0600, Logan Gunthorpe wrote:
> Using sysfs means we don't need all the messy callbacks from the nvme
> driver, which is a plus. But I'm not sure how we'd get or unmap the
> mapping of a sysfs file or avoid the anonymous inode. Seems with the
> existing PCI resources, it uses an bin_attribute->f_mapping() callback
> to pass back the iomem_get_mapping() mapping on file open.
> revoke_iomem() is then used to nuke the VMAs. I don't think we can use
> the same infrastructure here as that would add a dependency on
> CONFIG_IO_STRICT_DEVMEM; which would be odd. And I'm not sure whether
> there is a better way.

Why can't we do the revoke on the actual sysfs inode?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-07-05 Thread Christoph Hellwig
On Tue, Jul 05, 2022 at 01:29:59PM -0300, Jason Gunthorpe wrote:
> > Making the entire area given by the device to the p2p allocator available
> > to user space seems sensible to me.  That is what the current series does,
> > and what a sysfs interface would do as well.
> 
> That makes openning the mmap exclusive with the in-kernel allocator -
> so it means opening the mmap fails if something else is using a P2P
> page and once the mmap is open all kernel side P2P allocations will
> fail?

No.  Just as in the current patchset you can mmap the file and will get
len / PAGE_SIZE pages from the per-device p2pdma pool, or the mmap will
fail if none are available.  A kernel consumer (or multiple) can use
other pages in the pool at the same time.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-07-05 Thread Christoph Hellwig
On Tue, Jul 05, 2022 at 10:51:02AM -0300, Jason Gunthorpe wrote:
> > In fact I'm not even sure this should be a character device, it seems
> > to fit it way better with the PCI sysfs hierchacy, just like how we
> > map MMIO resources, which these are anyway.  And once it is on sysfs
> > we do have a uniqueue inode and need none of the pseudofs stuff, and
> > don't need all the glue code in nvme either.
> 
> Shouldn't there be an allocator here? It feels a bit weird that the
> entire CMB is given to a single process, it is a sharable resource,
> isn't it?

Making the entire area given by the device to the p2p allocator available
to user space seems sensible to me.  That is what the current series does,
and what a sysfs interface would do as well.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-07-05 Thread Christoph Hellwig
On Wed, Jun 29, 2022 at 02:59:06PM -0300, Jason Gunthorpe wrote:
> I've tried in the past, this is not a good idea. There is no way to
> handle failures when a VMA is dup'd and if you rely on private_data
> you almost certainly have to alloc here.
> 
> Then there is the issue of making the locking work on invalidation
> which is crazy ugly.
> 
> > I was not a fan of the extra code for this either, but I was given to
> > understand that it was the standard way to collect and cleanup VMAs.
> 
> Christoph you tried tried to clean it once globally, what happened to
> that?

Al pointed out that there are various places that rely on having a
separate file system.  I might be able to go back to it and see
if we could at least do it for some users.

But what also really matters here:  I don't want every user that
wants to be able to mmap a character device to do all this work.
The layering is simply wrong, it needs some character device
based helpers, not be open code everywhere.

In fact I'm not even sure this should be a character device, it seems
to fit it way better with the PCI sysfs hierchacy, just like how we
map MMIO resources, which these are anyway.  And once it is on sysfs
we do have a uniqueue inode and need none of the pseudofs stuff, and
don't need all the glue code in nvme either.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 16/21] block: add check when merging zone device pages

2022-07-03 Thread Christoph Hellwig
On Thu, Jun 30, 2022 at 03:50:10PM -0600, Logan Gunthorpe wrote:
> Oh, it turns out this code has nothing to do with REQ_NOMERGE. It's used
> indirectly in bio_map_user_iov() and __bio_iov_iter_get_pages() when
> adding pages to the bio via page_is_mergeable(). So it's not about
> requests being merged it's about pages being merged.

Oh, true.

> So I'm not sure how we can avoid this, but it only happens when two
> adjacent pages are added to the same bio in a row, so I don't think it's
> that common, but the check can probably be moved down so it happens
> after the same_page check to make it a little less common.

Yes, looks like we have to keep it.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH -next] dma-mapping: Fix build error unused-value

2022-06-30 Thread Christoph Hellwig
Thanks, this looks good with a minor nit below:

Reviewed-by: Christoph Hellwig 

Mathieu, can you pick this up through your tree as that is where the
offending commit was merged through?

> Fixes: e61c451476e6("dma-mapping: Add dma_release_coherent_memory to DMA API")

missing space before the opening brace here.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/2] swiotlb: Split up single swiotlb lock

2022-06-29 Thread Christoph Hellwig
On Mon, Jun 27, 2022 at 11:31:49AM -0400, Tianyu Lan wrote:
> +/**
> + * struct io_tlb_area - IO TLB memory area descriptor
> + *
> + * This is a single area with a single lock.
> + *
> + * @used:The number of used IO TLB block.
> + * @index:   The slot index to start searching in this area for next round.
> + * @lock:The lock to protect the above data structures in the map and
> + *   unmap calls.
> + */
> +struct io_tlb_area {
> + unsigned long used;
> + unsigned int index;
> + spinlock_t lock;
> +};

As already mentioned last time, please move this into swiotlb.c,
swiotlb.h only uses a pointer to this structure.

>  static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t 
> start,
> - unsigned long nslabs, unsigned int flags, bool late_alloc)
> + unsigned long nslabs, unsigned int flags,
> + bool late_alloc, unsigned int nareas)

Nit: the two tab indentation for prototype continuations is a lot easier
to maintain, so don't graciously switch away from it.

> + alloc_size - (offset + ((i - slot_index) << 
> IO_TLB_SHIFT));

Overly long line here.

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 2/2] x86/ACPI: Set swiotlb area according to the number of lapic entry in MADT

2022-06-29 Thread Christoph Hellwig
On Mon, Jun 27, 2022 at 11:31:50AM -0400, Tianyu Lan wrote:
> From: Tianyu Lan 
> 
> When initialize swiotlb bounce buffer, smp_init() has not been
> called and cpu number can not be got from num_online_cpus().
> Use the number of lapic entry to set swiotlb area number and
> keep swiotlb area number equal to cpu number on the x86 platform.

Can we reorder that initialization?  Because I really hate having
to have an arch hook in every architecture.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 00/21] Userspace P2PDMA with O_DIRECT NVMe devices

2022-06-28 Thread Christoph Hellwig
Given how long this is stuck and how big and touching many subsystems,
can we start to make progress on this pice-mail?

I think patches 1-13 look pretty solid, and assuming an review for the
dma-iommu bits these patches could probaby be queued up ASAP.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 20/21] PCI/P2PDMA: Introduce pci_mmap_p2pmem()

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 10:12:32AM -0600, Logan Gunthorpe wrote:
> A pseudo mount is used to allocate an inode for each PCI device. The
> inode's address_space is used in the file doing the mmap so that all
> VMAs are collected and can be unmapped if the PCI device is unbound.
> After unmapping, the VMAs are iterated through and their pages are
> put so the device can continue to be unbound. An active flag is used
> to signal to VMAs not to allocate any further P2P memory once the
> removal process starts. The flag is synchronized with concurrent
> access with an RCU lock.

Can't we come up with a way of doing this without all the pseudo-fs
garbagage?  I really hate all the overhead for that in the next
nvme patch as well.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 16/21] block: add check when merging zone device pages

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 10:12:28AM -0600, Logan Gunthorpe wrote:
> Consecutive zone device pages should not be merged into the same sgl
> or bvec segment with other types of pages or if they belong to different
> pgmaps. Otherwise getting the pgmap of a given segment is not possible
> without scanning the entire segment. This helper returns true either if
> both pages are not zone device pages or both pages are zone device
> pages with the same pgmap.
> 
> Add a helper to determine if zone device pages are mergeable and use
> this helper in page_is_mergeable().

Any reason not to simply set REQ_NOMERGE for these requests?  We
can't merge for passthrough requests anyway, and genrally don't merge
for direct I/O either, so adding all this overhead seems a bit pointless.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 15/21] iov_iter: introduce iov_iter_get_pages_[alloc_]flags()

2022-06-28 Thread Christoph Hellwig
I think this is going to have massive conflicts with Al's iov_iter
support..
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 14/21] mm: introduce FOLL_PCI_P2PDMA to gate getting PCI P2PDMA pages

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 10:12:26AM -0600, Logan Gunthorpe wrote:
> GUP Callers that expect PCI P2PDMA pages can now set FOLL_PCI_P2PDMA to
> allow obtaining P2PDMA pages. If GUP is called without the flag and a
> P2PDMA page is found, it will return an error.
> 
> FOLL_PCI_P2PDMA cannot be set if FOLL_LONGTERM is set.

Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 13/21] PCI/P2PDMA: Remove pci_p2pdma_[un]map_sg()

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 12/21] RDMA/rw: drop pci_p2pdma_[un]map_sg()

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 10:12:24AM -0600, Logan Gunthorpe wrote:
> dma_map_sg() now supports the use of P2PDMA pages so pci_p2pdma_map_sg()
> is no longer necessary and may be dropped. This means the
> rdma_rw_[un]map_sg() helpers are no longer necessary. Remove it all.

Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 11/21] RDMA/core: introduce ib_dma_pci_p2p_dma_supported()

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 10:12:23AM -0600, Logan Gunthorpe wrote:
> Introduce the helper function ib_dma_pci_p2p_dma_supported() to check
> if a given ib_device can be used in P2PDMA transfers. This ensures
> the ib_device is not using virt_dma and also that the underlying
> dma_device supports P2PDMA.
> 
> Use the new helper in nvme-rdma to replace the existing check for
> ib_uses_virt_dma(). Adding the dma_pci_p2pdma_supported() check allows
> switching away from pci_p2pdma_[un]map_sg().
> 
> Signed-off-by: Logan Gunthorpe 
> Reviewed-by: Jason Gunthorpe 
> Reviewed-by: Max Gurtovoy 

Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 10/21] nvme-pci: convert to using dma_map_sgtable()

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 09/21] nvme-pci: check DMA ops when indicating support for PCI P2PDMA

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 07/21] dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 06/21] dma-direct: support PCI P2PDMA pages in dma-direct map_sg

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 05/21] dma-mapping: allow EREMOTEIO return code for P2PDMA transfers

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 04/21] PCI/P2PDMA: Introduce helpers for dma_map_sg implementations

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 03/21] PCI/P2PDMA: Expose pci_p2pdma_map_type()

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 02/21] PCI/P2PDMA: Attempt to set map_type if it has not been set

2022-06-28 Thread Christoph Hellwig
Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v7 01/21] lib/scatterlist: add flag for indicating P2PDMA segments in an SGL

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 10:12:13AM -0600, Logan Gunthorpe wrote:
> Make use of the third free LSB in scatterlist's page_link on 64bit systems.
> 
> The extra bit will be used by dma_[un]map_sg_p2pdma() to determine when a
> given SGL segments dma_address points to a PCI bus address.
> dma_unmap_sg_p2pdma() will need to perform different cleanup when a
> segment is marked as a bus address.
> 
> The new bit will only be used when CONFIG_PCI_P2PDMA is set; this means
> PCI P2PDMA will require CONFIG_64BIT. This should be acceptable as the
> majority of P2PDMA use cases are restricted to newer root complexes and
> roughly require the extra address space for memory BARs used in the
> transactions.
> 
> Signed-off-by: Logan Gunthorpe 
> Reviewed-by: Chaitanya Kulkarni 

Looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: fully convert arm to use dma-direct v3

2022-06-28 Thread Christoph Hellwig
Any comments or additional testing?  It would be really great to get
this off the table.

On Tue, Jun 14, 2022 at 11:20:37AM +0200, Christoph Hellwig wrote:
> Hi all,
> 
> arm is the last platform not using the dma-direct code for directly
> mapped DMA.  With the dmaboune removal from Arnd we can easily switch
> arm to always use dma-direct now (it already does for LPAE configs
> and nommu).  I'd love to merge this series through the dma-mapping tree
> as it gives us the opportunity for additional core dma-mapping
> improvements.
> 
> Changes since v2:
>  - rebased to Linux 5.19-rc2
> 
> Changes since v1:
>  - remove another unused function
>  - improve a few commit logs
>  - add three more patches from Robin
> 
> Diffstat:
>  arch/arm/common/dmabounce.c  |  582 -
>  arch/arm/include/asm/dma-mapping.h   |  128 ---
>  b/arch/arm/Kconfig   |5 
>  b/arch/arm/common/Kconfig|6 
>  b/arch/arm/common/Makefile   |1 
>  b/arch/arm/common/sa.c   |   64 -
>  b/arch/arm/include/asm/device.h  |3 
>  b/arch/arm/include/asm/dma-direct.h  |   49 -
>  b/arch/arm/include/asm/memory.h  |2 
>  b/arch/arm/mach-footbridge/Kconfig   |1 
>  b/arch/arm/mach-footbridge/common.c  |   19 
>  b/arch/arm/mach-footbridge/include/mach/dma-direct.h |8 
>  b/arch/arm/mach-footbridge/include/mach/memory.h |4 
>  b/arch/arm/mach-highbank/highbank.c  |2 
>  b/arch/arm/mach-mvebu/coherency.c|2 
>  b/arch/arm/mm/dma-mapping.c  |  649 
> ++-
>  b/drivers/usb/core/hcd.c |   17 
>  b/drivers/usb/host/ohci-sa.c |   25 
>  18 files changed, 137 insertions(+), 1430 deletions(-)
> 
> ___
> linux-arm-kernel mailing list
> linux-arm-ker...@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
---end quoted text---
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 3/3] arch/*/: remove CONFIG_VIRT_TO_BUS

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 29, 2022 at 09:38:00AM +1200, Michael Schmitz wrote:
> That's one of the 'liberties' I alluded to. The reason I left these in is
> that I'm none too certain what device feature the DMA API uses to decide a
> device isn't cache-coherent.

The DMA API does not look at device features at all.  It needs to be
told so by the platform code.  Once an architecture implements the
hooks to support non-coherent DMA all devices are treated as
non-coherent by default unless overriden by the architecture either
globally (using the global dma_default_coherent variable) or per-device
(using the dev->dma_coherent field, usually set by arch_setup_dma_ops).

> If it's dev->coherent_dma_mask, the way I set
> up the device in the a3000 driver should leave the coherent mask unchanged.
> For the Zorro drivers, devices are set up to use the same storage to store
> normal and coherent masks - something we most likely want to change. I need
> to think about the ramifications of that.

No, the coherent mask is slightly misnamed amd not actually related.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 3/3] arch/*/: remove CONFIG_VIRT_TO_BUS

2022-06-28 Thread Christoph Hellwig
On Wed, Jun 29, 2022 at 11:09:00AM +1200, Michael Schmitz wrote:
> And all SCSI buffers are allocated using kmalloc? No way at all for user
> space to pass unaligned data?

Most that you will see actually comes from the page allocator.  But
the block layer has a dma_alignment limit, and when userspace sends
I/O that is not properly aligned it will be bounce buffered before
it it sent to the driver.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v4 5/5] libata-scsi: Cap ata_device->max_sectors according to shost->max_sectors

2022-06-28 Thread Christoph Hellwig
On Tue, Jun 28, 2022 at 12:33:58PM +0100, John Garry wrote:
> Well Christoph originally offered to take this series via the dma-mapping 
> tree.
>
> @Christoph, is that still ok with you? If so, would you rather I send this 
> libata patch separately?

The offer still stands, and I don't really care where the libata
patch is routed.  Just tell me what you prefer.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[GIT PULL] dma-mapping fix for Linux 5.19

2022-06-25 Thread Christoph Hellwig
The following changes since commit a111daf0c53ae91e71fd2bfe7497862d14132e3e:

  Linux 5.19-rc3 (2022-06-19 15:06:47 -0500)

are available in the Git repository at:

  git://git.infradead.org/users/hch/dma-mapping.git 
tags/dma-mapping-5.19-2022-06-26

for you to fetch changes up to 3be4562584bba603f33863a00c1c32eecf772ee6:

  dma-direct: use the correct size for dma_set_encrypted() (2022-06-23 15:26:59 
+0200)


dma-mapping fixes for Linux 5.19

 - pass the correct size to dma_set_encrypted() when freeing memory
   (Dexuan Cui)


Dexuan Cui (1):
  dma-direct: use the correct size for dma_set_encrypted()

 kernel/dma/direct.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2] MAINTAINERS: Add new IOMMU development mailing list

2022-06-24 Thread Christoph Hellwig
On Fri, Jun 24, 2022 at 02:51:39PM +0200, Joerg Roedel wrote:
> From: Joerg Roedel 
> 
> The IOMMU mailing list will move from lists.linux-foundation.org to
> lists.linux.dev. The hard switch of the archive will happen on July
> 5th, but add the new list now already so that people start using the
> list when sending patches. After July 5th the old list will disappear.

Shouldn't this also remove the old list given it has only a few days
to live?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] MAINTAINERS: Add new IOMMU development mailing list

2022-06-23 Thread Christoph Hellwig
On Wed, Jun 22, 2022 at 10:26:01AM +0200, Joerg Roedel wrote:
> From: Joerg Roedel 
> 
> The IOMMU mailing list will move from lists.linux-foundation.org to
> lists.linux.dev. The hard switch of the archive will happen on July
> 5th, but add the new list now already so that people start using the
> list when sending patches.

iommu@lists.linux-foundation.org is also listed for various other
MAINTAINERS entries.  Can you please send a list to update all of them
to Linus ASAP, including for 5.19 and -stable?

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] dma-direct: use the correct size for dma_set_encrypted()

2022-06-23 Thread Christoph Hellwig
On Thu, Jun 23, 2022 at 07:00:58AM +, Dexuan Cui wrote:
> It looks like commit 4a37f3dd9a831 fixed a different issue?
> 
> Here my patch is for the latest mainline:
> 
> In dma_direct_alloc()'s error handling path, we pass 'size' to 
> dma_set_encrypted():
>   out_encrypt_pages:
> if (dma_set_encrypted(dev, page_address(page), size))
> 
> However, in dma_direct_free(), we pass ' 1 << page_order ' to 
> dma_set_encrypted().
> I think the ' 1 << page_order' is incorrect and it should be 'size' as well?

Indeed.  I've applied the patch now.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] dma-direct: use the correct size for dma_set_encrypted()

2022-06-22 Thread Christoph Hellwig
On Wed, Jun 22, 2022 at 12:14:24PM -0700, Dexuan Cui wrote:
> The third parameter of dma_set_encrypted() is a size in bytes rather than
> the number of pages.
> 
> Fixes: 4d0564785bb0 ("dma-direct: factor out dma_set_{de,en}crypted helpers")
> Signed-off-by: Dexuan Cui 

see:

commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 (tag: 
dma-mapping-5.19-2022-05-25)
Author: Robin Murphy 
Date:   Fri May 20 18:10:13 2022 +0100

dma-direct: don't over-decrypt memory
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 0/2] dma-mapping, remoteproc: Fix dma_mem leak after rproc_shutdown

2022-06-22 Thread Christoph Hellwig
On Wed, Jun 22, 2022 at 10:25:40AM -0600, Mathieu Poirier wrote:
> On Sat, Apr 23, 2022 at 07:46:50PM +0200, Christoph Hellwig wrote:
> > Sigh.  In theory drivers should never declare coherent memory like
> > this, and there has been some work to fix remoteproc in that regard.
> > 
> > But I guess until that is merged we'll need somthing like this fix.
> 
> Should I take this in the remoteproc tree?  If so, can I get an RB?

Reluctantly-Acked-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] swiotlb: Remove redundant swiotlb_force

2022-06-22 Thread Christoph Hellwig
On Wed, Jun 22, 2022 at 03:29:52PM +0100, Steven Price wrote:
> The variable (and enum) was removed in commit c6af2aa9ffc9 ("swiotlb:
> make the swiotlb_init interface more useful") but the declaration was
> left in swiotlb.h. Tidy up by removing the declaration as well.
> 
> Signed-off-by: Steven Price 

I just applied an identical patch from Dongli Zhang a few hours ago.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] dma-direct: take dma-ranges/offsets into account in resource mapping

2022-06-22 Thread Christoph Hellwig
I'd really like to hear something from the driver maintainers.  The
cod change itself looks fine, we just need to make sure it does not
break any driver assumptions.

And I think at least for the PCIe P2P and NTB cases I fear it might
break them.  The proper logic for those is in the p2p helpers, but
it seems like not everyone is using them.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH V4 1/1] swiotlb: Split up single swiotlb lock

2022-06-22 Thread Christoph Hellwig
Thanks,

this looks pretty good to me.  A few comments below:

On Fri, Jun 17, 2022 at 10:47:41AM -0400, Tianyu Lan wrote:
> +/**
> + * struct io_tlb_area - IO TLB memory area descriptor
> + *
> + * This is a single area with a single lock.
> + *
> + * @used:The number of used IO TLB block.
> + * @index:   The slot index to start searching in this area for next round.
> + * @lock:The lock to protect the above data structures in the map and
> + *   unmap calls.
> + */
> +struct io_tlb_area {
> + unsigned long used;
> + unsigned int index;
> + spinlock_t lock;
> +};

This can go into swiotlb.c.

> +void __init swiotlb_adjust_nareas(unsigned int nareas);

And this should be marked static.

> +#define DEFAULT_NUM_AREAS 1

I'd drop this define, the magic 1 and a > 1 comparism seems to
convey how it is used much better as the checks aren't about default
or not, but about larger than one.

I also think that we want some good way to size the default, e.g.
by number of CPUs or memory size.

> +void __init swiotlb_adjust_nareas(unsigned int nareas)
> +{
> + if (!is_power_of_2(nareas)) {
> + pr_err("swiotlb: Invalid areas parameter %d.\n", nareas);
> + return;
> + }
> +
> + default_nareas = nareas;
> +
> + pr_info("area num %d.\n", nareas);
> + /* Round up number of slabs to the next power of 2.
> +  * The last area is going be smaller than the rest if
> +  * default_nslabs is not power of two.
> +  */

Please follow the normal kernel comment style with a /* on its own line.

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1 0/4] swiotlb: some cleanup

2022-06-22 Thread Christoph Hellwig
Thanks,

I've applied all 4 to the dma-mapping tree for Linux 5.20.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: helping with remapping vmem for dma

2022-06-15 Thread Christoph Hellwig
On Wed, Jun 15, 2022 at 02:15:33PM +0100, Robin Murphy wrote:
> Put simply, if you want to call dma_map_single() on a buffer, then that 
> buffer needs to be allocated with kmalloc() (or technically alloc_pages(), 
> but then dma_map_page() would make more sense when dealing with entire 
> pages.

Yes.  It sounds like the memory here comes from the dma coherent
allocator, in which case the code need to use the address returned
by that and not create another mapping.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 10/10] ARM/dma-mapping: merge IOMMU ops

2022-06-14 Thread Christoph Hellwig
From: Robin Murphy 

The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.

Signed-off-by: Robin Murphy 
Signed-off-by: Christoph Hellwig 
Tested-by: Marc Zyngier 
---
 arch/arm/mm/dma-mapping.c | 37 +
 1 file changed, 13 insertions(+), 24 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e7ccf7c82e025..e68d1d2ac4be0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *s;
int i;
 
+   if (dev->dma_coherent)
+   return;
+
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
 
@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device 
*dev,
struct scatterlist *s;
int i;
 
+   if (dev->dma_coherent)
+   return;
+
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 }
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device 
*dev,
 {
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
-   struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, 
iova));
+   struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
 
-   if (!iova)
+   if (dev->dma_coherent || !iova)
return;
 
+   page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct 
device *dev,
 {
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
-   struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, 
iova));
+   struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
 
-   if (!iova)
+   if (dev->dma_coherent || !iova)
return;
 
+   page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource,
 };
 
-static const struct dma_map_ops iommu_coherent_ops = {
-   .alloc  = arm_iommu_alloc_attrs,
-   .free   = arm_iommu_free_attrs,
-   .mmap   = arm_iommu_mmap_attrs,
-   .get_sgtable= arm_iommu_get_sgtable,
-
-   .map_page   = arm_iommu_map_page,
-   .unmap_page = arm_iommu_unmap_page,
-
-   .map_sg = arm_iommu_map_sg,
-   .unmap_sg   = arm_iommu_unmap_sg,
-
-   .map_resource   = arm_iommu_map_resource,
-   .unmap_resource = arm_iommu_unmap_resource,
-};
-
 /**
  * arm_iommu_create_mapping
  * @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, 
u64 dma_base, u64 size,
return;
}
 
-   if (coherent)
-   set_dma_ops(dev, &iommu_coherent_ops);
-   else
-   set_dma_ops(dev, &iommu_ops);
+   set_dma_ops(dev, &iommu_ops);
 }
 
 static void arm_teardown_iommu_dma_ops(struct device *dev)
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 09/10] ARM/dma-mapping: consolidate IOMMU ops callbacks

2022-06-14 Thread Christoph Hellwig
From: Robin Murphy 

Merge the coherent and non-coherent callbacks down to a single
implementation each, relying on the generic dev->dma_coherent
flag at the points where the difference matters.

Signed-off-by: Robin Murphy 
Signed-off-by: Christoph Hellwig 
Tested-by: Marc Zyngier 
---
 arch/arm/mm/dma-mapping.c | 238 +-
 1 file changed, 55 insertions(+), 183 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4055f2dc2859e..e7ccf7c82e025 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1079,13 +1079,13 @@ static void __iommu_free_atomic(struct device *dev, 
void *cpu_addr,
__free_from_pool(cpu_addr, size);
 }
 
-static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
-   dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
-   int coherent_flag)
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+   dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
void *addr = NULL;
+   int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
 
*handle = DMA_MAPPING_ERROR;
size = PAGE_ALIGN(size);
@@ -1128,19 +1128,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, 
size_t size,
return NULL;
 }
 
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
-   dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
-   return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
-}
-
-static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
-   dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
-   return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
-}
-
-static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct 
*vma,
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
 {
@@ -1154,35 +1142,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, 
struct vm_area_struct *vma
if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
 
+   if (!dev->dma_coherent)
+   vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
err = vm_map_pages(vma, pages, nr_pages);
if (err)
pr_err("Remapping memory failed: %d\n", err);
 
return err;
 }
-static int arm_iommu_mmap_attrs(struct device *dev,
-   struct vm_area_struct *vma, void *cpu_addr,
-   dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
-   vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
-   return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 
attrs);
-}
-
-static int arm_coherent_iommu_mmap_attrs(struct device *dev,
-   struct vm_area_struct *vma, void *cpu_addr,
-   dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
-   return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 
attrs);
-}
 
 /*
  * free a page as defined by the above mapping.
  * Must not be called with IRQs disabled.
  */
-static void __arm_iommu_free_attrs(struct device *dev, size_t size, void 
*cpu_addr,
-   dma_addr_t handle, unsigned long attrs, int coherent_flag)
+static void arm_iommu_free_attrs(struct device *dev, size_t size, void 
*cpu_addr,
+   dma_addr_t handle, unsigned long attrs)
 {
+   int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
struct page **pages;
size = PAGE_ALIGN(size);
 
@@ -1204,19 +1181,6 @@ static void __arm_iommu_free_attrs(struct device *dev, 
size_t size, void *cpu_ad
__iommu_free_buffer(dev, pages, size, attrs);
 }
 
-static void arm_iommu_free_attrs(struct device *dev, size_t size,
-void *cpu_addr, dma_addr_t handle,
-unsigned long attrs)
-{
-   __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
-}
-
-static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
-   void *cpu_addr, dma_addr_t handle, unsigned long attrs)
-{
-   __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
-}
-
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
 void *cpu_addr, dma_addr_t dma_addr,
 size_t size, unsigned long attrs)
@@ -1236,8 +1200,7 @@ static int arm_iommu_get_sgtable(struct device *dev, 
struct sg_table *sgt,
  */
 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
  size_t size, dma_addr_t *handle,
- enum dma_data_direction dir, unsigned long attrs,
- 

[PATCH 08/10] ARM/dma-mapping: drop .dma_supported for IOMMU ops

2022-06-14 Thread Christoph Hellwig
From: Robin Murphy 

When an IOMMU is present, we trust that it should be capable
of remapping any physical memory, and since the device masks
represent the input (virtual) addresses to the IOMMU it makes
no sense to validate them against physical PFNs anyway.

Signed-off-by: Robin Murphy 
Signed-off-by: Christoph Hellwig 
Tested-by: Marc Zyngier 
---
 arch/arm/mm/dma-mapping.c | 23 ---
 1 file changed, 23 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ceb56928d01ec..4055f2dc2859e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -104,25 +104,6 @@ static struct arm_dma_buffer *arm_dma_buffer_find(void 
*virt)
  *
  */
 
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
-/*
- * Return whether the given device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ff as the mask
- * to this function.
- */
-static int arm_dma_supported(struct device *dev, u64 mask)
-{
-   unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
-
-   /*
-* Translate the device's DMA mask to a PFN limit.  This
-* PFN number includes the page which we can DMA to.
-*/
-   return PHYS_PFN(dma_to_phys(dev, mask)) >= max_dma_pfn;
-}
-#endif
-
 static void __dma_clear_buffer(struct page *page, size_t size, int 
coherent_flag)
 {
/*
@@ -1681,8 +1662,6 @@ static const struct dma_map_ops iommu_ops = {
 
.map_resource   = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
-
-   .dma_supported  = arm_dma_supported,
 };
 
 static const struct dma_map_ops iommu_coherent_ops = {
@@ -1699,8 +1678,6 @@ static const struct dma_map_ops iommu_coherent_ops = {
 
.map_resource   = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
-
-   .dma_supported  = arm_dma_supported,
 };
 
 /**
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 07/10] ARM/dma-mapping: use dma-direct unconditionally

2022-06-14 Thread Christoph Hellwig
Use dma-direct unconditionally on arm.  It has already been used for
some time for LPAE and nommu configurations.

This mostly changes the streaming mapping implementation and the (simple)
coherent allocator for device that are DMA coherent.  The existing
complex allocator for uncached mappings for non-coherent devices is still
used as is using the arch_dma_alloc/arch_dma_free hooks.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Arnd Bergmann 
Acked-by: Andre Przywara  [highbank]
Tested-by: Marc Zyngier 
---
 arch/arm/Kconfig   |   4 +-
 arch/arm/include/asm/dma-mapping.h |  24 --
 arch/arm/mach-highbank/highbank.c  |   2 +-
 arch/arm/mach-mvebu/coherency.c|   2 +-
 arch/arm/mm/dma-mapping.c  | 365 ++---
 5 files changed, 19 insertions(+), 378 deletions(-)
 delete mode 100644 arch/arm/include/asm/dma-mapping.h

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cd67e359958cb..4c18fe7b5d1cf 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -19,8 +19,8 @@ config ARM
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
-   select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
-   select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
+   select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+   select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
deleted file mode 100644
index 6427b934bd11c..0
--- a/arch/arm/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASMARM_DMA_MAPPING_H
-#define ASMARM_DMA_MAPPING_H
-
-#ifdef __KERNEL__
-
-#include 
-#include 
-
-#include 
-#include 
-
-extern const struct dma_map_ops arm_dma_ops;
-extern const struct dma_map_ops arm_coherent_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
-   if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
-   return &arm_dma_ops;
-   return NULL;
-}
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/arch/arm/mach-highbank/highbank.c 
b/arch/arm/mach-highbank/highbank.c
index db607955a7e45..5d4f977ac7d2a 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -98,7 +98,7 @@ static int highbank_platform_notifier(struct notifier_block 
*nb,
if (of_property_read_bool(dev->of_node, "dma-coherent")) {
val = readl(sregs_base + reg);
writel(val | 0xff01, sregs_base + reg);
-   set_dma_ops(dev, &arm_coherent_dma_ops);
+   dev->dma_coherent = true;
}
 
return NOTIFY_OK;
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 49e3c8d20c2fa..865ac4bc060df 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -98,7 +98,7 @@ static int mvebu_hwcc_notifier(struct notifier_block *nb,
 
if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE;
-   set_dma_ops(dev, &arm_coherent_dma_ops);
+   dev->dma_coherent = true;
 
return NOTIFY_OK;
 }
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index a09ce16c7ddbd..ceb56928d01ec 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -103,79 +103,8 @@ static struct arm_dma_buffer *arm_dma_buffer_find(void 
*virt)
  * before transfers and delay cache invalidation until transfer completion.
  *
  */
-static void __dma_page_cpu_to_dev(struct page *, unsigned long,
-   size_t, enum dma_data_direction);
-static void __dma_page_dev_to_cpu(struct page *, unsigned long,
-   size_t, enum dma_data_direction);
-
-/**
- * arm_dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
-unsigned long offset, size_t size, enum dma_data_direction dir,
-unsigned long attrs)
-{
-   if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
-   __dma_page_cpu_to_dev(page, offset, size, dir);
-   return phys_to_dma(dev, page_to_phys(page) + offset);
-}
-
-static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page 
*page,
-unsigne

[PATCH 05/10] ARM/dma-mapping: use dma_to_phys/phys_to_dma in the dma-mapping code

2022-06-14 Thread Christoph Hellwig
Use the helpers as expected by the dma-direct code in the old arm
dma-mapping code to ease a gradual switch to the common DMA code.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Arnd Bergmann 
Tested-by: Marc Zyngier 
---
 arch/arm/mm/dma-mapping.c | 24 
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 90142183d1045..a09ce16c7ddbd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -128,14 +128,14 @@ static dma_addr_t arm_dma_map_page(struct device *dev, 
struct page *page,
 {
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(page, offset, size, dir);
-   return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+   return phys_to_dma(dev, page_to_phys(page) + offset);
 }
 
 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page 
*page,
 unsigned long offset, size_t size, enum dma_data_direction dir,
 unsigned long attrs)
 {
-   return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+   return phys_to_dma(dev, page_to_phys(page) + offset);
 }
 
 /**
@@ -156,7 +156,7 @@ static void arm_dma_unmap_page(struct device *dev, 
dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
-   __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+   __dma_page_dev_to_cpu(phys_to_page(dma_to_phys(dev, handle)),
  handle & ~PAGE_MASK, size, dir);
 }
 
@@ -164,7 +164,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
unsigned int offset = handle & (PAGE_SIZE - 1);
-   struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+   struct page *page = phys_to_page(dma_to_phys(dev, handle-offset));
__dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
@@ -172,7 +172,7 @@ static void arm_dma_sync_single_for_device(struct device 
*dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
unsigned int offset = handle & (PAGE_SIZE - 1);
-   struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+   struct page *page = phys_to_page(dma_to_phys(dev, handle-offset));
__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
@@ -190,7 +190,7 @@ static int arm_dma_supported(struct device *dev, u64 mask)
 * Translate the device's DMA mask to a PFN limit.  This
 * PFN number includes the page which we can DMA to.
 */
-   return dma_to_pfn(dev, mask) >= max_dma_pfn;
+   return PHYS_PFN(dma_to_phys(dev, mask)) >= max_dma_pfn;
 }
 
 static void __dma_clear_buffer(struct page *page, size_t size, int 
coherent_flag)
@@ -681,7 +681,7 @@ static void *__dma_alloc(struct device *dev, size_t size, 
dma_addr_t *handle,
if (page) {
unsigned long flags;
 
-   *handle = pfn_to_dma(dev, page_to_pfn(page));
+   *handle = phys_to_dma(dev, page_to_phys(page));
buf->virt = args.want_vaddr ? addr : page;
 
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
@@ -721,7 +721,7 @@ static int __arm_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
int ret = -ENXIO;
unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   unsigned long pfn = dma_to_pfn(dev, dma_addr);
+   unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
unsigned long off = vma->vm_pgoff;
 
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
@@ -762,7 +762,7 @@ static void __arm_dma_free(struct device *dev, size_t size, 
void *cpu_addr,
   dma_addr_t handle, unsigned long attrs,
   bool is_coherent)
 {
-   struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+   struct page *page = phys_to_page(dma_to_phys(dev, handle));
struct arm_dma_buffer *buf;
struct arm_dma_free_args args = {
.dev = dev,
@@ -796,15 +796,15 @@ static int arm_dma_get_sgtable(struct device *dev, struct 
sg_table *sgt,
 void *cpu_addr, dma_addr_t handle, size_t size,
 unsigned long attrs)
 {
-   unsigned long pfn = dma_to_pfn(dev, handle);
+   phys_addr_t paddr = dma_to_phys(dev, handle);
struct page *page;
int ret;
 
/* If the PFN is not valid, we do not have a struct page */
-   if (!pfn_valid(pfn))
+   if (!pfn_valid(PHYS_PFN(paddr)))
return -ENXIO;
 
-   page = pfn_to_page(pfn);
+   page = phys_to_page(paddr);
 
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
-- 
2.30.2

___

[PATCH 06/10] ARM/dma-mapping: use the generic versions of dma_to_phys/phys_to_dma by default

2022-06-14 Thread Christoph Hellwig
Only the footbridge platforms provide their own DMA address translation
helpers, so switch to the generic version for all other platforms, and
consolidate the footbridge implementation to remove two levels of
indirection.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Arnd Bergmann 
Tested-by: Marc Zyngier 
---
 arch/arm/Kconfig  |  1 -
 arch/arm/include/asm/dma-direct.h | 41 +--
 arch/arm/include/asm/memory.h |  2 -
 arch/arm/mach-footbridge/Kconfig  |  1 +
 arch/arm/mach-footbridge/common.c | 19 +
 .../mach-footbridge/include/mach/dma-direct.h |  8 
 .../arm/mach-footbridge/include/mach/memory.h |  4 --
 7 files changed, 21 insertions(+), 55 deletions(-)
 create mode 100644 arch/arm/mach-footbridge/include/mach/dma-direct.h

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7630ba9cb6ccc..cd67e359958cb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -15,7 +15,6 @@ config ARM
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
-   select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
diff --git a/arch/arm/include/asm/dma-direct.h 
b/arch/arm/include/asm/dma-direct.h
index 6fd52713b5d12..4f7bcde03abb5 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -1,40 +1 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_ARM_DMA_DIRECT_H
-#define ASM_ARM_DMA_DIRECT_H 1
-
-#include 
-
-/*
- * dma_to_pfn/pfn_to_dma are architecture private
- * functions used internally by the DMA-mapping API to provide DMA
- * addresses. They must not be used by drivers.
- */
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
-   if (dev && dev->dma_range_map)
-   pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn)));
-   return (dma_addr_t)__pfn_to_bus(pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
-   unsigned long pfn = __bus_to_pfn(addr);
-
-   if (dev && dev->dma_range_map)
-   pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn)));
-   return pfn;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
-   unsigned int offset = paddr & ~PAGE_MASK;
-   return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
-   unsigned int offset = dev_addr & ~PAGE_MASK;
-   return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-#endif /* ASM_ARM_DMA_DIRECT_H */
+#include 
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index f673e13e0f942..a55a9038abc89 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -378,8 +378,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
 #ifndef __virt_to_bus
 #define __virt_to_bus  __virt_to_phys
 #define __bus_to_virt  __phys_to_virt
-#define __pfn_to_bus(x)__pfn_to_phys(x)
-#define __bus_to_pfn(x)__phys_to_pfn(x)
 #endif
 
 /*
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index 728aff93fba9d..b5bbdcf2e4896 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -60,6 +60,7 @@ endmenu
 
 # Footbridge support
 config FOOTBRIDGE
+   select ARCH_HAS_PHYS_TO_DMA
bool
 
 # Footbridge in host mode
diff --git a/arch/arm/mach-footbridge/common.c 
b/arch/arm/mach-footbridge/common.c
index 322495df271d5..5020eb96b025d 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include 
@@ -335,17 +336,19 @@ unsigned long __bus_to_virt(unsigned long res)
return res;
 }
 EXPORT_SYMBOL(__bus_to_virt);
-
-unsigned long __pfn_to_bus(unsigned long pfn)
+#else
+static inline unsigned long fb_bus_sdram_offset(void)
 {
-   return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
+   return BUS_OFFSET;
 }
-EXPORT_SYMBOL(__pfn_to_bus);
+#endif /* CONFIG_FOOTBRIDGE_ADDIN */
 
-unsigned long __bus_to_pfn(unsigned long bus)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
-   return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET));
+   return paddr + (fb_bus_sdram_offset() - PHYS_OFFSET);
 }
-EXPORT_SYMBOL(__bus_to_pfn);
 
-#endif
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+   return dev_addr - (fb_bus_sdram_offset() - PHYS_OFFSET);
+}
diff --git a/arch/arm/mach-footbridge/include/mach/dma-direct.h 
b/arch/arm/mach-footbridge/include/mach/dma-direct.h
new file mode 100644
index 0..01f9e8367c009
--- /d

[PATCH 04/10] ARM/dma-mapping: remove the unused virt_to_dma helper

2022-06-14 Thread Christoph Hellwig
virt_to_dma was only used by the now removed dmabounce code.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Arnd Bergmann 
Tested-by: Marc Zyngier 
---
 arch/arm/include/asm/dma-direct.h | 10 +-
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/arch/arm/include/asm/dma-direct.h 
b/arch/arm/include/asm/dma-direct.h
index 77fcb7ee5ec90..6fd52713b5d12 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -5,7 +5,7 @@
 #include 
 
 /*
- * dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private
+ * dma_to_pfn/pfn_to_dma are architecture private
  * functions used internally by the DMA-mapping API to provide DMA
  * addresses. They must not be used by drivers.
  */
@@ -25,14 +25,6 @@ static inline unsigned long dma_to_pfn(struct device *dev, 
dma_addr_t addr)
return pfn;
 }
 
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
-   if (dev)
-   return pfn_to_dma(dev, virt_to_pfn(addr));
-
-   return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
-}
-
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
unsigned int offset = paddr & ~PAGE_MASK;
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 03/10] ARM/dma-mapping: mark various dma-mapping routines static in dma-mapping.c

2022-06-14 Thread Christoph Hellwig
With the dmabounce removal these aren't used outside of dma-mapping.c,
so mark them static.  Move the dma_map_ops declarations down a bit
to avoid lots of forward declarations.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Arnd Bergmann 
Tested-by: Marc Zyngier 
---
 arch/arm/include/asm/dma-mapping.h |  75 --
 arch/arm/mm/dma-mapping.c  | 100 +
 2 files changed, 46 insertions(+), 129 deletions(-)

diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 1e015a7ad86aa..6427b934bd11c 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -20,80 +20,5 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
return NULL;
 }
 
-/**
- * arm_dma_alloc - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- * @attrs: optinal attributes that specific mapping properties
- *
- * Allocate some memory for a device for performing DMA.  This function
- * allocates pages, and will return the CPU-viewed address, and sets @handle
- * to be the device-viewed address.
- */
-extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-  gfp_t gfp, unsigned long attrs);
-
-/**
- * arm_dma_free - free memory allocated by arm_dma_alloc
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * arm_dma_alloc().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
-dma_addr_t handle, unsigned long attrs);
-
-/**
- * arm_dma_mmap - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @size: size of memory originally requested in dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
- * into user space.  The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-   void *cpu_addr, dma_addr_t dma_addr, size_t size,
-   unsigned long attrs);
-
-/*
- * For SA-, IXP425, and ADI systems  the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/*
- * The scatter list versions of the above methods.
- */
-extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
-   enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
-   enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
-   enum dma_data_direction);
-extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, 
int,
-   enum dma_data_direction);
-extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
-   void *cpu_addr, dma_addr_t dma_addr, size_t size,
-   unsigned long attrs);
-
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 059cce0185706..90142183d1045 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -193,50 +193,6 @@ static int arm_dma_supported(struct device *dev, u64 mask)
return dma_to_pfn(dev, mask) >= max_dma_pfn;
 }
 
-const struct dma_map_ops arm_dma_ops = {
-   .alloc  = arm_dma_alloc,
-   .free   = arm_dma_free,
-   .alloc_pages= dma_direct_alloc_pages,
-   .free_pages = dma_direct_free_pages,
-   .mmap   = arm_dma_mmap,
-   .get_sgtable= a

[PATCH 02/10] ARM/dma-mapping: remove dmabounce

2022-06-14 Thread Christoph Hellwig
Remove the now unused dmabounce code.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Arnd Bergmann 
---
 arch/arm/common/Kconfig|   4 -
 arch/arm/common/Makefile   |   1 -
 arch/arm/common/dmabounce.c| 582 -
 arch/arm/include/asm/device.h  |   3 -
 arch/arm/include/asm/dma-mapping.h |  29 --
 5 files changed, 619 deletions(-)
 delete mode 100644 arch/arm/common/dmabounce.c

diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index bc158fd227e12..d2fdb1796f488 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -3,10 +3,6 @@ config SA
bool
select ZONE_DMA if ARCH_SA1100
 
-config DMABOUNCE
-   bool
-   select ZONE_DMA
-
 config KRAIT_L2_ACCESSORS
bool
 
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 8cd574be94cfe..7bae8cbaafe78 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -6,7 +6,6 @@
 obj-y  += firmware.o
 
 obj-$(CONFIG_SA)   += sa.o
-obj-$(CONFIG_DMABOUNCE)+= dmabounce.o
 obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o
 obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
 obj-$(CONFIG_SHARP_PARAM)  += sharpsl_param.o
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
deleted file mode 100644
index 7996c04393d50..0
--- a/arch/arm/common/dmabounce.c
+++ /dev/null
@@ -1,582 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  arch/arm/common/dmabounce.c
- *
- *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
- *  limited DMA windows. These functions utilize bounce buffers to
- *  copy data to/from buffers located outside the DMA region. This
- *  only works for systems in which DMA memory is at the bottom of
- *  RAM, the remainder of memory is at the top and the DMA memory
- *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
- *  DMA windows will require custom implementations that reserve memory
- *  areas at early bootup.
- *
- *  Original version by Brad Parker (b...@heeltoe.com)
- *  Re-written by Christopher Hoover 
- *  Made generic by Deepak Saxena 
- *
- *  Copyright (C) 2002 Hewlett Packard Company.
- *  Copyright (C) 2004 MontaVista Software, Inc.
- */
-
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#include 
-#include 
-
-#undef STATS
-
-#ifdef STATS
-#define DO_STATS(X) do { X ; } while (0)
-#else
-#define DO_STATS(X) do { } while (0)
-#endif
-
-/* ** */
-
-struct safe_buffer {
-   struct list_head node;
-
-   /* original request */
-   void*ptr;
-   size_t  size;
-   int direction;
-
-   /* safe buffer info */
-   struct dmabounce_pool *pool;
-   void*safe;
-   dma_addr_t  safe_dma_addr;
-};
-
-struct dmabounce_pool {
-   unsigned long   size;
-   struct dma_pool *pool;
-#ifdef STATS
-   unsigned long   allocs;
-#endif
-};
-
-struct dmabounce_device_info {
-   struct device *dev;
-   struct list_head safe_buffers;
-#ifdef STATS
-   unsigned long total_allocs;
-   unsigned long map_op_count;
-   unsigned long bounce_count;
-   int attr_res;
-#endif
-   struct dmabounce_pool   small;
-   struct dmabounce_pool   large;
-
-   rwlock_t lock;
-
-   int (*needs_bounce)(struct device *, dma_addr_t, size_t);
-};
-
-#ifdef STATS
-static ssize_t dmabounce_show(struct device *dev, struct device_attribute 
*attr,
- char *buf)
-{
-   struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
-   return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
-   device_info->small.allocs,
-   device_info->large.allocs,
-   device_info->total_allocs - device_info->small.allocs -
-   device_info->large.allocs,
-   device_info->total_allocs,
-   device_info->map_op_count,
-   device_info->bounce_count);
-}
-
-static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
-#endif
-
-
-/* allocate a 'safe' buffer and keep track of it */
-static inline struct safe_buffer *
-alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
- size_t size, enum dma_data_direction dir)
-{
-   struct safe_buffer *buf;
-   struct dmabounce_pool *pool;
-   struct device *dev = device_info->dev;
-   unsigned long flags;
-
-   dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
-   __func__, ptr, size, dir);
-
-   if (size <= device_info->small.size) {
-   pool = &device_info->small;
-   } else if (size <= device_info->large.size) {
-   pool = &device_info->large;
-  

[PATCH 01/10] ARM: sa1100/assabet: move dmabounce hack to ohci driver

2022-06-14 Thread Christoph Hellwig
From: Arnd Bergmann 

The sa platform is one of the two remaining users of the old Arm
specific "dmabounce" code, which is an earlier implementation of the
generic swiotlb.

Linus Walleij submitted a patch that removes dmabounce support from
the ixp4xx, and I had a look at the other user, which is the sa
companion chip.

Looking at how dmabounce is used, I could narrow it down to one driver
one three machines:

 - dmabounce is only initialized on assabet/neponset, jornada720 and
   badge4, which are the platforms that have an sa and support
   DMA on it.

 - All three of these suffer from "erratum #7" that requires only
   doing DMA to half the memory sections based on one of the address
   lines, in addition, the neponset also can't DMA to the RAM that
   is connected to sa itself.

 - the pxa lubbock machine also has sa, but does not support DMA
   on it and does not set dmabounce.

 - only the OHCI and audio devices on sa support DMA, but as
   there is no audio driver for this hardware, only OHCI remains.

In the OHCI code, I noticed that two other platforms already have
a local bounce buffer support in the form of the "local_mem"
allocator. Specifically, TMIO and SM501 use this on a few other ARM
boards with 16KB or 128KB of local SRAM that can be accessed from the
OHCI and from the CPU.

While this is not the same problem as on sa, I could not find a
reason why we can't re-use the existing implementation but replace the
physical SRAM address mapping with a locally allocated DMA buffer.

There are two main downsides:

 - rather than using a dynamically sized pool, this buffer needs
   to be allocated at probe time using a fixed size. Without
   having any idea of what it should be, I picked a size of
   64KB, which is between what the other two OHCI front-ends use
   in their SRAM. If anyone has a better idea what that size
   is reasonable, this can be trivially changed.

 - Previously, only USB transfers to unaddressable memory needed
   to go through the bounce buffer, now all of them do, which may
   impact runtime performance for USB endpoints that do a lot of
   transfers.

On the upside, the local_mem support uses write-combining buffers,
which should be a bit faster for transfers to the device compared to
normal uncached coherent memory as used in dmabounce.

Cc: Linus Walleij 
Cc: Russell King 
Cc: Christoph Hellwig 
Cc: Laurentiu Tudor 
Cc: linux-...@vger.kernel.org
Signed-off-by: Arnd Bergmann 
Reviewed-by: Greg Kroah-Hartman 
Acked-by: Alan Stern 
Signed-off-by: Christoph Hellwig 
---
 arch/arm/common/Kconfig|  2 +-
 arch/arm/common/sa.c   | 64 --
 drivers/usb/core/hcd.c | 17 +++--
 drivers/usb/host/ohci-sa.c | 25 +
 4 files changed, 40 insertions(+), 68 deletions(-)

diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index c8e198631d418..bc158fd227e12 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 config SA
bool
-   select DMABOUNCE if !ARCH_PXA
+   select ZONE_DMA if ARCH_SA1100
 
 config DMABOUNCE
bool
diff --git a/arch/arm/common/sa.c b/arch/arm/common/sa.c
index 2343e2b6214d7..f5e6990b8856b 100644
--- a/arch/arm/common/sa.c
+++ b/arch/arm/common/sa.c
@@ -1389,70 +1389,9 @@ void sa_driver_unregister(struct sa_driver 
*driver)
 }
 EXPORT_SYMBOL(sa_driver_unregister);
 
-#ifdef CONFIG_DMABOUNCE
-/*
- * According to the "Intel StrongARM SA- Microprocessor Companion
- * Chip Specification Update" (June 2000), erratum #7, there is a
- * significant bug in the SA SDRAM shared memory controller.  If
- * an access to a region of memory above 1MB relative to the bank base,
- * it is important that address bit 10 _NOT_ be asserted. Depending
- * on the configuration of the RAM, bit 10 may correspond to one
- * of several different (processor-relative) address bits.
- *
- * This routine only identifies whether or not a given DMA address
- * is susceptible to the bug.
- *
- * This should only get called for sa_device types due to the
- * way we configure our device dma_masks.
- */
-static int sa_needs_bounce(struct device *dev, dma_addr_t addr, size_t 
size)
-{
-   /*
-* Section 4.6 of the "Intel StrongARM SA- Development Module
-* User's Guide" mentions that jumpers R51 and R52 control the
-* target of SA- DMA (either SDRAM bank 0 on Assabet, or
-* SDRAM bank 1 on Neponset). The default configuration selects
-* Assabet, so any address in bank 1 is necessarily invalid.
-*/
-   return (machine_is_assabet() || machine_is_pfs168()) &&
-   (addr >= 0xc800 || (addr + size) >= 0xc800);
-}
-
-static int sa_notifier_call(struct notifier_block *n, unsigned long action

fully convert arm to use dma-direct v3

2022-06-14 Thread Christoph Hellwig
Hi all,

arm is the last platform not using the dma-direct code for directly
mapped DMA.  With the dmaboune removal from Arnd we can easily switch
arm to always use dma-direct now (it already does for LPAE configs
and nommu).  I'd love to merge this series through the dma-mapping tree
as it gives us the opportunity for additional core dma-mapping
improvements.

Changes since v2:
 - rebased to Linux 5.19-rc2

Changes since v1:
 - remove another unused function
 - improve a few commit logs
 - add three more patches from Robin

Diffstat:
 arch/arm/common/dmabounce.c  |  582 -
 arch/arm/include/asm/dma-mapping.h   |  128 ---
 b/arch/arm/Kconfig   |5 
 b/arch/arm/common/Kconfig|6 
 b/arch/arm/common/Makefile   |1 
 b/arch/arm/common/sa.c   |   64 -
 b/arch/arm/include/asm/device.h  |3 
 b/arch/arm/include/asm/dma-direct.h  |   49 -
 b/arch/arm/include/asm/memory.h  |2 
 b/arch/arm/mach-footbridge/Kconfig   |1 
 b/arch/arm/mach-footbridge/common.c  |   19 
 b/arch/arm/mach-footbridge/include/mach/dma-direct.h |8 
 b/arch/arm/mach-footbridge/include/mach/memory.h |4 
 b/arch/arm/mach-highbank/highbank.c  |2 
 b/arch/arm/mach-mvebu/coherency.c|2 
 b/arch/arm/mm/dma-mapping.c  |  649 ++-
 b/drivers/usb/core/hcd.c |   17 
 b/drivers/usb/host/ohci-sa.c |   25 
 18 files changed, 137 insertions(+), 1430 deletions(-)
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 4/7] swiotlb: to implement io_tlb_high_mem

2022-06-12 Thread Christoph Hellwig
On Fri, Jun 10, 2022 at 02:56:08PM -0700, Dongli Zhang wrote:
> Since this patch file has 200+ lines, would you please help clarify what does
> 'this' indicate?

This indicates that any choice of a different swiotlb pools needs to
be hidden inside of Ń•wiotlb.  The dma mapping API already provides
swiotlb the addressability requirement for the device.  Similarly we
already have a SWIOTLB_ANY flag that switches to a 64-bit buffer
by default, which we can change to, or replace with a flag that
allocates an additional buffer that is not addressing limited.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [PATCH v2] iommu/dma: Add config for PCI SAC address trick

2022-06-09 Thread Christoph Hellwig
On Thu, Jun 09, 2022 at 04:12:10PM +0100, Robin Murphy wrote:
> +   If you have a modern PCI Express based system, this feature mostly 
> just

Overly long line here.

Otherwise looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 3/7] swiotlb-xen: support highmem for xen specific code

2022-06-08 Thread Christoph Hellwig
On Wed, Jun 08, 2022 at 05:55:49PM -0700, Dongli Zhang wrote:
> @@ -109,19 +110,25 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs, 
> bool high)
>   int rc;
>   unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
>   unsigned int i, dma_bits = order + PAGE_SHIFT;
> + unsigned int max_dma_bits = MAX_DMA32_BITS;
>   dma_addr_t dma_handle;
>   phys_addr_t p = virt_to_phys(buf);
>  
>   BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
>   BUG_ON(nslabs % IO_TLB_SEGSIZE);
>  
> + if (high) {
> + dma_bits = MAX_DMA64_BITS;
> + max_dma_bits = MAX_DMA64_BITS;
> + }
> +

I think you really want to pass the addressing bits or mask to the
remap callback and not do magic with a 'high' flag here.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 7/7] swiotlb: fix the slot_addr() overflow

2022-06-08 Thread Christoph Hellwig
On Wed, Jun 08, 2022 at 05:55:53PM -0700, Dongli Zhang wrote:
> +#define slot_addr(start, idx)((start) + \
> + (((unsigned long)idx) << IO_TLB_SHIFT))

Please just convert it to an inline function.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 6/7] virtio: use io_tlb_high_mem if it is active

2022-06-08 Thread Christoph Hellwig
On Wed, Jun 08, 2022 at 05:55:52PM -0700, Dongli Zhang wrote:
>  /* Unique numbering for virtio devices. */
> @@ -241,6 +243,12 @@ static int virtio_dev_probe(struct device *_d)
>   u64 device_features;
>   u64 driver_features;
>   u64 driver_features_legacy;
> + struct device *parent = dev->dev.parent;
> + u64 dma_mask = min_not_zero(*parent->dma_mask,
> + parent->bus_dma_limit);
> +
> + if (dma_mask == DMA_BIT_MASK(64))
> + swiotlb_use_high(parent);

The driver already very clearly communicated its addressing
requirements.  The underlying swiotlb code needs to transparently
pick the right pool.

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 5/7] swiotlb: add interface to set dev->dma_io_tlb_mem

2022-06-08 Thread Christoph Hellwig
This should be handled under the hood without the driver even knowing.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 4/7] swiotlb: to implement io_tlb_high_mem

2022-06-08 Thread Christoph Hellwig
All this really needs to be hidden under the hood.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC v1 1/7] swiotlb: introduce the highmem swiotlb buffer

2022-06-08 Thread Christoph Hellwig
On Wed, Jun 08, 2022 at 05:55:47PM -0700, Dongli Zhang wrote:
> @@ -109,6 +109,7 @@ struct io_tlb_mem {
>   } *slots;
>  };
>  extern struct io_tlb_mem io_tlb_default_mem;
> +extern struct io_tlb_mem io_tlb_high_mem;

Tis should not be exposed.

> +extern bool swiotlb_high_active(void);

And this should not even exist.

> +static unsigned long high_nslabs;

And I don't think "high" is a good name here to start with.  That
suggests highmem, which we are not using here.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[GIT PULL] dma-mapping fixes for 5.19

2022-06-05 Thread Christoph Hellwig
[I really wanted to send these before -rc1, but the fact that today is
 a public holiday here really confused me and messed up my schedule]

The following changes since commit 4a37f3dd9a83186cb88d44808ab35b78375082c9:

  dma-direct: don't over-decrypt memory (2022-05-23 15:25:40 +0200)

are available in the Git repository at:

  git://git.infradead.org/users/hch/dma-mapping.git 
tags/dma-mapping-5.19-2022-06-06

for you to fetch changes up to e15db62bc5648ab459a570862f654e787c498faf:

  swiotlb: fix setting ->force_bounce (2022-06-02 07:17:59 +0200)


dma-mapping fixes for Linux 5.19

 - fix a regressin in setting swiotlb ->force_bounce (me)
 - make dma-debug less chatty (Rob Clark)

----
Christoph Hellwig (1):
  swiotlb: fix setting ->force_bounce

Rob Clark (1):
  dma-debug: make things less spammy under memory pressure

 kernel/dma/debug.c   |  2 +-
 kernel/dma/swiotlb.c | 14 ++
 2 files changed, 7 insertions(+), 9 deletions(-)
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH] swiotlb: fix setting ->force_bounce

2022-06-01 Thread Christoph Hellwig
The swiotlb_init refactor messed up assigning ->force_bounce by doing
it in different places based on what caused the setting of the flag.

Fix this by passing the SWIOTLB_* flags to swiotlb_init_io_tlb_mem
and just setting it there.

Fixes: c6af2aa9ffc9 ("swiotlb: make the swiotlb_init interface more useful")
Reported-by: Nathan Chancellor 
Signed-off-by: Christoph Hellwig 
Tested-by: Nathan Chancellor 
---
 kernel/dma/swiotlb.c | 14 ++
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index dfa1de89dc944..cb50f8d383606 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void)
 }
 
 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
-   unsigned long nslabs, bool late_alloc)
+   unsigned long nslabs, unsigned int flags, bool late_alloc)
 {
void *vaddr = phys_to_virt(start);
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
@@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, 
phys_addr_t start,
mem->index = 0;
mem->late_alloc = late_alloc;
 
-   if (swiotlb_force_bounce)
-   mem->force_bounce = true;
+   mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
 
spin_lock_init(&mem->lock);
for (i = 0; i < mem->nslabs; i++) {
@@ -275,8 +274,7 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
  __func__, alloc_size, PAGE_SIZE);
 
-   swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
-   mem->force_bounce = flags & SWIOTLB_FORCE;
+   swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
 
if (flags & SWIOTLB_VERBOSE)
swiotlb_print_info();
@@ -348,7 +346,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 
set_memory_decrypted((unsigned long)vstart,
 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
-   swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
+   swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
 
swiotlb_print_info();
return 0;
@@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem 
*rmem,
 
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
 rmem->size >> PAGE_SHIFT);
-   swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
-   mem->force_bounce = true;
+   swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
+   false);
mem->for_alloc = true;
 
rmem->priv = mem;
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 09/15] swiotlb: make the swiotlb_init interface more useful

2022-06-01 Thread Christoph Hellwig
On Wed, Jun 01, 2022 at 11:11:57AM -0700, Nathan Chancellor wrote:
> On Wed, Jun 01, 2022 at 07:57:43PM +0200, Christoph Hellwig wrote:
> > On Wed, Jun 01, 2022 at 10:46:54AM -0700, Nathan Chancellor wrote:
> > > On Wed, Jun 01, 2022 at 07:34:41PM +0200, Christoph Hellwig wrote:
> > > > Can you send me the full dmesg and the content of
> > > > /sys/kernel/debug/swiotlb/io_tlb_nslabs for a good and a bad boot?
> > > 
> > > Sure thing, they are attached! If there is anything else I can provide
> > > or test, I am more than happy to do so.
> > 
> > Nothing interesting.  But the performance numbers almost look like
> > swiotlb=force got ignored before (even if I can't explain why).
> 
> I was able to get my performance back with this diff but I don't know if
> this is a hack or a proper fix in the context of the series.

This looks good, but needs a little tweak.  I'd go for this variant of
it:


diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index dfa1de89dc944..cb50f8d383606 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void)
 }
 
 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
-   unsigned long nslabs, bool late_alloc)
+   unsigned long nslabs, unsigned int flags, bool late_alloc)
 {
void *vaddr = phys_to_virt(start);
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
@@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, 
phys_addr_t start,
mem->index = 0;
mem->late_alloc = late_alloc;
 
-   if (swiotlb_force_bounce)
-   mem->force_bounce = true;
+   mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
 
spin_lock_init(&mem->lock);
for (i = 0; i < mem->nslabs; i++) {
@@ -275,8 +274,7 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
  __func__, alloc_size, PAGE_SIZE);
 
-   swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
-   mem->force_bounce = flags & SWIOTLB_FORCE;
+   swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
 
if (flags & SWIOTLB_VERBOSE)
swiotlb_print_info();
@@ -348,7 +346,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 
set_memory_decrypted((unsigned long)vstart,
 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
-   swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
+   swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
 
swiotlb_print_info();
return 0;
@@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem 
*rmem,
 
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
 rmem->size >> PAGE_SHIFT);
-   swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
-   mem->force_bounce = true;
+   swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
+   false);
mem->for_alloc = true;
 
rmem->priv = mem;

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 09/15] swiotlb: make the swiotlb_init interface more useful

2022-06-01 Thread Christoph Hellwig
On Wed, Jun 01, 2022 at 10:46:54AM -0700, Nathan Chancellor wrote:
> On Wed, Jun 01, 2022 at 07:34:41PM +0200, Christoph Hellwig wrote:
> > Can you send me the full dmesg and the content of
> > /sys/kernel/debug/swiotlb/io_tlb_nslabs for a good and a bad boot?
> 
> Sure thing, they are attached! If there is anything else I can provide
> or test, I am more than happy to do so.

Nothing interesting.  But the performance numbers almost look like
swiotlb=force got ignored before (even if I can't explain why).
Do you get a similar performance with the new kernel without
swiotlb=force as the old one with that argument by any chance?

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 09/15] swiotlb: make the swiotlb_init interface more useful

2022-06-01 Thread Christoph Hellwig
Can you send me the full dmesg and the content of
/sys/kernel/debug/swiotlb/io_tlb_nslabs for a good and a bad boot?

Thanks!
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] MAINTAINERS: refurbish SWIOTLB SUBSYSTEM sections after refactoring

2022-06-01 Thread Christoph Hellwig
On Wed, Jun 01, 2022 at 09:56:13AM +0200, Lukas Bulwahn wrote:
> +F:   arch/x86/kernel/pci-dma.c

I think this file is better left for the x86 maintainers.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] dma-debug: Make things less spammy under memory pressure

2022-05-31 Thread Christoph Hellwig
On Tue, May 31, 2022 at 03:19:45PM -0700, Rob Clark wrote:
> um, quite..  tbf that was in the context of a WIP igt test for
> shrinker which was trying to cycle thru ~2x RAM size worth of GEM
> buffers on something like 72 threads.  So it could just be threads
> that had gotten past the dma_debug_disabled() check already before
> global_disable was set to true?
> 
> I guess this could be pr_err_once() instead, then?

Yes, we could use pr_err_once to reduce the chattyness while still
keeping global_disable to disable all the actual tracking.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[GIT PULL] dma-mapping updates for Linux 5.19

2022-05-25 Thread Christoph Hellwig
The following changes since commit b2d229d4ddb17db541098b83524d901257e93845:

  Linux 5.18-rc3 (2022-04-17 13:57:31 -0700)

are available in the Git repository at:

  git://git.infradead.org/users/hch/dma-mapping.git 
tags/dma-mapping-5.19-2022-05-25

for you to fetch changes up to 4a37f3dd9a83186cb88d44808ab35b78375082c9:

  dma-direct: don't over-decrypt memory (2022-05-23 15:25:40 +0200)

There is a small merge conflict with the (as of now not merged yet) iommu
tree, which removes some code modified in this pull request.  The proper
merge resolution is to still remove the modified code.


dma-mapping updates for Linux 5.19

 - don't over-decrypt memory (Robin Murphy)
 - takes min align mask into account for the swiotlb max mapping size
   (Tianyu Lan)
 - use GFP_ATOMIC in dma-debug (Mikulas Patocka)
 - fix DMA_ATTR_NO_KERNEL_MAPPING on xen/arm (me)
 - don't fail on highmem CMA pages in dma_direct_alloc_pages (me)
 - cleanup swiotlb initialization and share more code with swiotlb-xen
   (me, Stefano Stabellini)

----
Christoph Hellwig (19):
  dma-direct: use is_swiotlb_active in dma_direct_map_page
  swiotlb: make swiotlb_exit a no-op if SWIOTLB_FORCE is set
  swiotlb: simplify swiotlb_max_segment
  swiotlb: rename swiotlb_late_init_with_default_size
  MIPS/octeon: use swiotlb_init instead of open coding it
  x86: remove the IOMMU table infrastructure
  x86: centralize setting SWIOTLB_FORCE when guest memory encryption is 
enabled
  swiotlb: make the swiotlb_init interface more useful
  swiotlb: add a SWIOTLB_ANY flag to lift the low memory restriction
  swiotlb: pass a gfp_mask argument to swiotlb_init_late
  swiotlb: provide swiotlb_init variants that remap the buffer
  swiotlb: merge swiotlb-xen initialization into swiotlb
  swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl
  x86: remove cruft from 
  swiotlb-xen: fix DMA_ATTR_NO_KERNEL_MAPPING on arm
  dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages
  swiotlb: don't panic when the swiotlb buffer can't be allocated
  swiotlb: use the right nslabs value in swiotlb_init_remap
  swiotlb: use the right nslabs-derived sizes in swiotlb_init_late

Mikulas Patocka (1):
  dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC

Robin Murphy (1):
  dma-direct: don't over-decrypt memory

Stefano Stabellini (1):
  arm/xen: don't check for xen_initial_domain() in 
xen_create_contiguous_region

Tianyu Lan (1):
  swiotlb: max mapping size takes min align mask into account

 arch/arm/include/asm/xen/page-coherent.h   |   2 -
 arch/arm/mm/init.c |   6 +-
 arch/arm/xen/mm.c  |  38 ++---
 arch/arm64/include/asm/xen/page-coherent.h |   2 -
 arch/arm64/mm/init.c   |   6 +-
 arch/ia64/include/asm/iommu_table.h|   7 -
 arch/ia64/mm/init.c|   4 +-
 arch/mips/cavium-octeon/dma-octeon.c   |  15 +-
 arch/mips/loongson64/dma.c |   2 +-
 arch/mips/pci/pci-octeon.c |   2 +-
 arch/mips/sibyte/common/dma.c  |   2 +-
 arch/powerpc/include/asm/svm.h |   4 -
 arch/powerpc/include/asm/swiotlb.h |   1 +
 arch/powerpc/kernel/dma-swiotlb.c  |   1 +
 arch/powerpc/mm/mem.c  |   6 +-
 arch/powerpc/platforms/pseries/setup.c |   3 -
 arch/powerpc/platforms/pseries/svm.c   |  26 +---
 arch/riscv/mm/init.c   |   8 +-
 arch/s390/mm/init.c|   3 +-
 arch/x86/include/asm/dma-mapping.h |  12 --
 arch/x86/include/asm/gart.h|   5 +-
 arch/x86/include/asm/iommu.h   |   8 +
 arch/x86/include/asm/iommu_table.h | 102 -
 arch/x86/include/asm/swiotlb.h |  30 
 arch/x86/include/asm/xen/page-coherent.h   |  24 ---
 arch/x86/include/asm/xen/page.h|   5 -
 arch/x86/include/asm/xen/swiotlb-xen.h |   8 +-
 arch/x86/kernel/Makefile   |   2 -
 arch/x86/kernel/amd_gart_64.c  |   5 +-
 arch/x86/kernel/aperture_64.c  |  14 +-
 arch/x86/kernel/cpu/mshyperv.c |   8 -
 arch/x86/kernel/pci-dma.c  | 114 +++---
 arch/x86/kernel/pci-iommu_table.c  |  77 --
 arch/x86/kernel/pci-swiotlb.c  |  77 --
 arch/x86/kernel/tboot.c|   1 -
 arch/x86/kernel/vmlinux.lds.S  |  12 --
 arch/x86/mm/mem_encrypt_amd.c  |   3 -
 arch/x86/pci/sta2x11-fixup.c   |   2 +-
 arch/x86/xen/Makefile  |   2 -
 arch/x86/xen/mmu_pv.c  |   1 +
 arch/x86/xen/pci-swiotlb-xen.c |  96 
 drivers/iommu/amd/i

Re: [PATCH 0/4] DMA mapping changes for SCSI core

2022-05-22 Thread Christoph Hellwig
The whole series looks fine to me.  I'll happily queue it up in the
dma-mapping tree if the SCSI and ATA maintainers are ok with that.

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] dma-direct: Don't over-decrypt memory

2022-05-22 Thread Christoph Hellwig
Thanks,

applied to the dma-mapping for-next branch.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] vfio: Do not manipulate iommu dma_owner for fake iommu groups

2022-05-19 Thread Christoph Hellwig
On Thu, May 19, 2022 at 02:03:48PM -0300, Jason Gunthorpe via iommu wrote:
> Since asserting dma ownership now causes the group to have its DMA blocked
> the iommu layer requires a working iommu. This means the dma_owner APIs
> cannot be used on the fake groups that VFIO creates. Test for this and
> avoid calling them.
> 
> Otherwise asserting dma ownership will fail for VFIO mdev devices as a
> BLOCKING iommu_domain cannot be allocated due to the NULL iommu ops.

Fake iommu groups come back to bite again, part 42..

The patch looks good:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] iommu/dma: Add config for PCI SAC address trick

2022-05-19 Thread Christoph Hellwig
On Wed, May 18, 2022 at 06:36:59PM +0100, Robin Murphy wrote:
> +config IOMMU_DMA_PCI_SAC_OPT
> + bool "Enable 64-bit legacy PCI optimisation by default"
> + depends on IOMMU_DMA
> + default X86
> + help
> +   Enable by default an IOMMU optimisation for 64-bit legacy PCI devices,
> +   wherein the DMA API layer will always first try to allocate a 32-bit
> +   DMA address suitable for a single address cycle, before falling back
> +   to allocating from the full usable address range. If your system has
> +   64-bit legacy PCI devices in 32-bit slots where using dual address
> +   cycles reduces DMA throughput significantly, this optimisation may be
> +   beneficial to overall performance.

The config option name sounds odd.  Yes, maybe for actual 64-bit PCI
this actualy is an optimization.  But I'd think of it more as a
workaround. and I'd probably word it as such.  I also would not not
default to true for x86, just allow for that.  There is nothing
fundamental about x86 wanting that, just that people use more crap
drivers on x86.  An the fact that AMD SEV sets the high bit for
encrypted memory has been weeding out at least some of them.

> +bool iommu_dma_forcedac __read_mostly = 
> !IS_ENABLED(CONFIG_IOMMU_DMA_PCI_SAC_OPT);

Overly long line here.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v4 2/6] iommu: Add a helper to do PASID lookup from domain

2022-05-18 Thread Christoph Hellwig
On Wed, May 18, 2022 at 11:21:16AM -0700, Jacob Pan wrote:
> +ioasid_t iommu_get_pasid_from_domain(struct device *dev, struct iommu_domain 
> *domain)

Overly long line here.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH] dma-iommu: Add iommu_dma_max_mapping_size()

2022-05-18 Thread Christoph Hellwig
On Tue, May 17, 2022 at 01:02:00PM +0100, Robin Murphy wrote:
>> So how to inform the SCSI driver of this caching limit then so that it may 
>> limit the SGL length?
>
> Driver-specific mechanism; block-layer-specific mechanism; redefine this 
> whole API to something like dma_opt_mapping_size(), as a limit above which 
> mappings might become less efficient or start to fail (callback to my 
> thoughts on [1] as well, I suppose); many options. Just not imposing a 
> ridiculously low *maximum* on everyone wherein mapping calls "should not be 
> larger than the returned value" when that's clearly bollocks.

Well, for swiotlb it is a hard limit.  So if we want to go down that
route we need two APIs, one for the optimal size and one for the
hard limit.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH] dma-iommu: Add iommu_dma_max_mapping_size()

2022-05-18 Thread Christoph Hellwig
On Tue, May 17, 2022 at 11:40:52AM +0100, Robin Murphy wrote:
> Indeed, sorry but NAK for this being nonsense. As I've said at least once 
> before, if the unnecessary SAC address allocation attempt slows down your 
> workload, make it not do that in the first place. If you don't like the 
> existing command-line parameter then fine, there are plenty of other 
> options, it just needs to be done in a way that doesn't break x86 systems 
> with dodgy firmware, as my first attempt turned out to.

What broke x86?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] swiotlb: Max mapping size takes min align mask into account

2022-05-17 Thread Christoph Hellwig
Thanks,

applied to the dma-mapping for-next tree.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH] dma-iommu: Add iommu_dma_max_mapping_size()

2022-05-17 Thread Christoph Hellwig
On Tue, May 17, 2022 at 10:02:00AM +0100, John Garry wrote:
> BTW, on a separate topic, I noticed that even with this change my ATA 
> devices have max_hw_sectors_kb of 32767, as opposed to 128 for SAS devices. 
> It seems that libata-scsi - specifically ata_scsi_dev_config() - doesn't 
> honour the shost max_sectors limit. I guess that is not intentional.

I don't think it is.  the libsas/libsata integration is a bit messy
sometimes..
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH] dma-iommu: Add iommu_dma_max_mapping_size()

2022-05-17 Thread Christoph Hellwig
On Mon, May 16, 2022 at 09:06:01PM +0800, John Garry wrote:
> For streaming DMA mappings involving an IOMMU and whose IOVA len regularly
> exceeds the IOVA rcache upper limit (meaning that they are not cached),
> performance can be reduced.
> 
> Add the IOMMU callback for DMA mapping API dma_max_mapping_size(), which
> allows the drivers to know the mapping limit and thus limit the requested 
> IOVA lengths.
> 
> This resolves the performance issue originally reported in [0] for a SCSI
> HBA driver which was regularly mapping SGLs which required IOVAs in
> excess of the IOVA caching limit. In this case the block layer limits the
> max sectors per request - as configured in __scsi_init_queue() - which
> will limit the total SGL length the driver tries to map and in turn limits
> IOVA lengths requested.
> 
> [0] 
> https://lore.kernel.org/linux-iommu/20210129092120.1482-1-thunder.leiz...@huawei.com/
> 
> Signed-off-by: John Garry 
> ---
> Sending as an RFC as iommu_dma_max_mapping_size() is a soft limit, and not
> a hard limit which I expect is the semantics of dma_map_ops.max_mapping_size
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 09f6e1c0f9c0..e2d5205cde37 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -1442,6 +1442,21 @@ static unsigned long 
> iommu_dma_get_merge_boundary(struct device *dev)
>   return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
>  }
>  

> + if (!domain)
> + return 0;
> +
> + cookie = domain->iova_cookie;
> + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
> + return 0;

Can these conditions even be true here?

> +static inline unsigned long iova_rcache_range(void)
> +{
> + return 0;
> +}

Given that IOMMU_DMA select IOMMU_IOVA there is no need for this stub.

Otherwise this looks sensible to me.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] iommu/dma: Fix check for error return from iommu_map_sg_atomic()

2022-05-17 Thread Christoph Hellwig
On Fri, May 13, 2022 at 05:39:48PM +0200, Niklas Schnelle wrote:
> In __iommu_dma_alloc_noncontiguous() the value returned by
> iommu_map_sg_atomic() is checked for being smaller than size. Before
> commit ad8f36e4b6b1 ("iommu: return full error code from
> iommu_map_sg[_atomic]()") this simply checked if the requested size was
> successfully mapped.
> 
> After that commit iommu_map_sg_atomic() may also return a negative
> error value. In principle this too would be covered by the existing
> check. There is one problem however, as size is of type size_t while the
> return type of iommu_map_sg_atomic() is now of type ssize_t the latter gets
> converted to size_t and negative error values end up as very large
> positive values making the check succeed. Fix this by making the return
> type visible with a local variable and add an explicit cast to ssize_t.
> 
> Fixes: ad8f36e4b6b1 ("iommu: return full error code from 
> iommu_map_sg[_atomic]()")
> Cc: sta...@vger.kernel.org
> Signed-off-by: Niklas Schnelle 

I don't see what the point of the newly added local variable is here.
Just casting size should be all that is needed as far as I can tell.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RFC PATCH V2 1/2] swiotlb: Add Child IO TLB mem support

2022-05-16 Thread Christoph Hellwig
I don't really understand how 'childs' fit in here.  The code also
doesn't seem to be usable without patch 2 and a caller of the
new functions added in patch 2, so it is rather impossible to review.

Also:

 1) why is SEV/TDX so different from other cases that need bounce
buffering to treat it different and we can't work on a general
scalability improvement
 2) per previous discussions at how swiotlb itself works, it is
clear that another option is to just make pages we DMA to
shared with the hypervisor.  Why don't we try that at least
for larger I/O?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 2/5] iommu: Add blocking_domain_ops field in iommu_ops

2022-05-16 Thread Christoph Hellwig
On Mon, May 16, 2022 at 09:57:56AM +0800, Lu Baolu wrote:
> Each IOMMU driver must provide a blocking domain ops. If the hardware
> supports detaching domain from device, setting blocking domain equals
> detaching the existing domain from the deivce. Otherwise, an UNMANAGED
> domain without any mapping will be used instead.

blocking in this case means not allowing any access?  The naming
sounds a bit odd to me as blocking in the kernel has a specific
meaning.  Maybe something like noaccess ops might be a better name?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] vfio: Remove VFIO_TYPE1_NESTING_IOMMU

2022-05-15 Thread Christoph Hellwig
Looks good,

Reviewed-by: Christoph Hellwig 

we really should not keep dead code like this around.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 0/7] iommu/vt-d: Make intel-iommu.h private

2022-05-15 Thread Christoph Hellwig
On Sat, May 14, 2022 at 09:43:15AM +0800, Lu Baolu wrote:
> Hi folks,
> 
> The include/linux/intel-iommu.h should be private to the Intel IOMMU
> driver. Other drivers or components should interact with the IOMMU
> drivers through the kAPIs provided by the iommu core.
> 
> This series cleanups all includes of intel-iommu.h outside of the Intel
> IOMMU driver and move this header from include/linux to
> drivers/iommu/intel/.
> 
> No functional changes intended. Please help to review and suggest.

Thanks, this was long overdue!

The series looks good to me:

Reviewed-by: Christoph Hellwig 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] swiotlb-xen: fix DMA_ATTR_NO_KERNEL_MAPPING on arm

2022-05-11 Thread Christoph Hellwig
On Fri, Apr 29, 2022 at 04:15:38PM -0700, Stefano Stabellini wrote:
> Great! Christoph you can go ahead and pick it up in your tree if you are
> up for it.

The patch is in the dma-mapping for-next brancch now:

http://git.infradead.org/users/hch/dma-mapping.git/commitdiff/62cb1ca1654b57589c582efae2748159c74ee356

There were a few smaller merge conflicts with the swiotlb refactoring.
I think everything is fine, but please take another look if possible.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 3/3] swiotlb: use the right nslabs-derived sizes in swiotlb_init_late

2022-05-11 Thread Christoph Hellwig
nslabs can shrink when allocations or the remap don't succeed, so make
sure to use it for all sizing.  For that remove the bytes value that
can get stale and replace it with local calculations and a boolean to
indicate if the originally requested size could not be allocated.

Fixes: 6424e31b1c05 ("swiotlb: remove swiotlb_init_with_tbl and 
swiotlb_init_late_with_tbl")
Signed-off-by: Christoph Hellwig 
---
 kernel/dma/swiotlb.c | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 113e1e8aaca37..d6e62a6a42ceb 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -297,9 +297,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 {
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
-   unsigned long bytes;
unsigned char *vstart = NULL;
unsigned int order;
+   bool retried = false;
int rc = 0;
 
if (swiotlb_force_disable)
@@ -308,7 +308,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
-   bytes = nslabs << IO_TLB_SHIFT;
 
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
@@ -316,16 +315,13 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
if (vstart)
break;
order--;
+   nslabs = SLABS_PER_PAGE << order;
+   retried = true;
}
 
if (!vstart)
return -ENOMEM;
 
-   if (order != get_order(bytes)) {
-   pr_warn("only able to allocate %ld MB\n",
-   (PAGE_SIZE << order) >> 20);
-   nslabs = SLABS_PER_PAGE << order;
-   }
if (remap)
rc = remap(vstart, nslabs);
if (rc) {
@@ -334,9 +330,15 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
if (nslabs < IO_TLB_MIN_SLABS)
return rc;
+   retried = true;
goto retry;
}
 
+   if (retried) {
+   pr_warn("only able to allocate %ld MB\n",
+   (PAGE_SIZE << order) >> 20);
+   }
+
mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(array_size(sizeof(*mem->slots), nslabs)));
if (!mem->slots) {
@@ -344,7 +346,8 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
return -ENOMEM;
}
 
-   set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
+   set_memory_decrypted((unsigned long)vstart,
+(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
 
swiotlb_print_info();
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


  1   2   3   4   5   6   7   8   9   10   >