Re: [PATCH v8 00/14] Convert powerpc to default topdown mmap layout (v8)

2022-04-03 Thread Christophe Leroy
Hi Andrew,

Le 11/03/2022 à 05:49, Andrew Morton a écrit :
> On Fri, 11 Mar 2022 15:26:42 +1100 Michael Ellerman  
> wrote:
> 
>>> What will be the merge strategy ? I guess it's a bit late to get it
>>> through powerpc tree, so I was just wondering whether we could get
>>> patches 2 to 5 in mm this cycle, and the powerpc ones next cycle ?
>>
>> Yeah I didn't pick it up because the mm changes don't have many acks and
>> I'm always nervous about carrying generic mm changes.
>>
>> It would be my preference if Andrew could take 2-5 through mm for v5.18,
>> but it is quite late, so I'm not sure how he will feel about that.
> 
> 5.18 isn't a problem.  Perhaps you meant 5.17, which would be real tough.
> 
> Can we take a look after 5.18-rc1?

5.18-rc1 was released last night.

Can you take patchs 2-5 as they are, or do you prefer I resend them to 
yourself as a standalone series ?

Thanks
Christophe

[PATCH 15/15] x86: remove cruft from

2022-04-03 Thread Christoph Hellwig
 gets pulled in by all drivers using the DMA API.
Remove x86 internal variables and unnecessary includes from it.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/include/asm/dma-mapping.h | 11 ---
 arch/x86/include/asm/iommu.h   |  2 ++
 2 files changed, 2 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/dma-mapping.h 
b/arch/x86/include/asm/dma-mapping.h
index 256fd8115223d..1c66708e30623 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -2,17 +2,6 @@
 #ifndef _ASM_X86_DMA_MAPPING_H
 #define _ASM_X86_DMA_MAPPING_H
 
-/*
- * IOMMU interface. See Documentation/core-api/dma-api-howto.rst and
- * Documentation/core-api/dma-api.rst for documentation.
- */
-
-#include 
-#include 
-
-extern int iommu_merge;
-extern int panic_on_overflow;
-
 extern const struct dma_map_ops *dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index dba89ed40d38d..0bef44d30a278 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -8,6 +8,8 @@
 
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
+extern int iommu_merge;
+extern int panic_on_overflow;
 
 #ifdef CONFIG_SWIOTLB
 extern bool x86_swiotlb_enable;
-- 
2.30.2



[PATCH 14/15] swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl

2022-04-03 Thread Christoph Hellwig
No users left.

Signed-off-by: Christoph Hellwig 
---
 include/linux/swiotlb.h |  2 --
 kernel/dma/swiotlb.c| 77 +++--
 2 files changed, 20 insertions(+), 59 deletions(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 7b50c82f84ce9..7ed35dd3de6e7 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -34,13 +34,11 @@ struct scatterlist;
 /* default to 64MB */
 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
 
-int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
 unsigned long swiotlb_size_or_default(void);
 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs));
 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
int (*remap)(void *tlb, unsigned long nslabs));
-extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 extern void __init swiotlb_update_mem_attributes(void);
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d5fe8f5e08300..c54fc40ebb493 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -225,33 +225,6 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem 
*mem, phys_addr_t start,
return;
 }
 
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
-   unsigned int flags)
-{
-   struct io_tlb_mem *mem = _tlb_default_mem;
-   size_t alloc_size;
-
-   if (swiotlb_force_disable)
-   return 0;
-
-   /* protect against double initialization */
-   if (WARN_ON_ONCE(mem->nslabs))
-   return -ENOMEM;
-
-   alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
-   mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
-   if (!mem->slots)
-   panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
- __func__, alloc_size, PAGE_SIZE);
-
-   swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
-   mem->force_bounce = flags & SWIOTLB_FORCE;
-
-   if (flags & SWIOTLB_VERBOSE)
-   swiotlb_print_info();
-   return 0;
-}
-
 /*
  * Statically reserve bounce buffer space and initialize bounce buffer data
  * structures for the software IO TLB used to implement the DMA API.
@@ -259,7 +232,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs,
 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
 {
+   struct io_tlb_mem *mem = _tlb_default_mem;
unsigned long nslabs = default_nslabs;
+   size_t alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
size_t bytes;
void *tlb;
 
@@ -280,7 +255,8 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb)
-   goto fail;
+   panic("%s: failed to allocate tlb structure\n", __func__);
+
if (remap && remap(tlb, nslabs) < 0) {
memblock_free(tlb, PAGE_ALIGN(bytes));
 
@@ -290,14 +266,17 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
  __func__, bytes);
goto retry;
}
-   if (swiotlb_init_with_tbl(tlb, default_nslabs, flags))
-   goto fail_free_mem;
-   return;
 
-fail_free_mem:
-   memblock_free(tlb, bytes);
-fail:
-   pr_warn("Cannot allocate buffer");
+   mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
+   if (!mem->slots)
+   panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
+ __func__, alloc_size, PAGE_SIZE);
+
+   swiotlb_init_io_tlb_mem(mem, __pa(tlb), default_nslabs, false);
+   mem->force_bounce = flags & SWIOTLB_FORCE;
+
+   if (flags & SWIOTLB_VERBOSE)
+   swiotlb_print_info();
 }
 
 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
@@ -313,6 +292,7 @@ void __init swiotlb_init(bool addressing_limit, unsigned 
int flags)
 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
int (*remap)(void *tlb, unsigned long nslabs))
 {
+   struct io_tlb_mem *mem = _tlb_default_mem;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned long bytes;
unsigned char *vstart = NULL;
@@ -353,33 +333,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
return rc;
goto retry;
}
-   rc = swiotlb_late_init_with_tbl(vstart, nslabs);
-   if (rc)
-   free_pages((unsigned long)vstart, order);
-
-   return rc;
-}
-
-int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
-{
-   struct io_tlb_mem *mem = _tlb_default_mem;
-   unsigned long bytes = nslabs << IO_TLB_SHIFT;
-
-   if 

[PATCH 13/15] swiotlb: merge swiotlb-xen initialization into swiotlb

2022-04-03 Thread Christoph Hellwig
Reuse the generic swiotlb initialization for xen-swiotlb.  For ARM/ARM64
this works trivially, while for x86 xen_swiotlb_fixup needs to be passed
as the remap argument to swiotlb_init_remap/swiotlb_init_late.

Note that the lower bound of the swiotlb size is changed to the smaller
IO_TLB_MIN_SLABS based value with this patch, but that is fine as the
2MB value used in Xen before was just an optimization and is not the
hard lower bound.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Stefano Stabellini 
---
 arch/arm/xen/mm.c   |  21 +++---
 arch/x86/include/asm/xen/page.h |   5 --
 arch/x86/kernel/pci-dma.c   |  20 ++---
 drivers/xen/swiotlb-xen.c   | 128 +---
 include/xen/arm/page.h  |   1 -
 include/xen/swiotlb-xen.h   |   8 +-
 6 files changed, 28 insertions(+), 155 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 28c2070602535..ff05a7899cb86 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -23,22 +23,20 @@
 #include 
 #include 
 
-unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+static gfp_t xen_swiotlb_gfp(void)
 {
phys_addr_t base;
-   gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
u64 i;
 
for_each_mem_range(i, , NULL) {
if (base < (phys_addr_t)0x) {
if (IS_ENABLED(CONFIG_ZONE_DMA32))
-   flags |= __GFP_DMA32;
-   else
-   flags |= __GFP_DMA;
-   break;
+   return __GFP_DMA32;
+   return __GFP_DMA;
}
}
-   return __get_free_pages(flags, order);
+
+   return GFP_KERNEL;
 }
 
 static bool hypercall_cflush = false;
@@ -140,10 +138,13 @@ static int __init xen_mm_init(void)
if (!xen_swiotlb_detect())
return 0;
 
-   rc = xen_swiotlb_init();
/* we can work with the default swiotlb */
-   if (rc < 0 && rc != -EEXIST)
-   return rc;
+   if (!io_tlb_default_mem.nslabs) {
+   rc = swiotlb_init_late(swiotlb_size_or_default(),
+  xen_swiotlb_gfp(), NULL);
+   if (rc < 0)
+   return rc;
+   }
 
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index e989bc2269f54..1fc67df500145 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -357,9 +357,4 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
return false;
 }
 
-static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
-{
-   return __get_free_pages(__GFP_NOWARN, order);
-}
-
 #endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index a705a199bf8a3..30bbe4abb5d61 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -72,15 +72,13 @@ static inline void __init pci_swiotlb_detect(void)
 #endif /* CONFIG_SWIOTLB */
 
 #ifdef CONFIG_SWIOTLB_XEN
-static bool xen_swiotlb;
-
 static void __init pci_xen_swiotlb_init(void)
 {
if (!xen_initial_domain() && !x86_swiotlb_enable)
return;
x86_swiotlb_enable = true;
-   xen_swiotlb = true;
-   xen_swiotlb_init_early();
+   x86_swiotlb_flags |= SWIOTLB_ANY;
+   swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
dma_ops = _swiotlb_dma_ops;
if (IS_ENABLED(CONFIG_PCI))
pci_request_acs();
@@ -88,14 +86,16 @@ static void __init pci_xen_swiotlb_init(void)
 
 int pci_xen_swiotlb_init_late(void)
 {
-   int rc;
-
-   if (xen_swiotlb)
+   if (dma_ops == _swiotlb_dma_ops)
return 0;
 
-   rc = xen_swiotlb_init();
-   if (rc)
-   return rc;
+   /* we can work with the default swiotlb */
+   if (!io_tlb_default_mem.nslabs) {
+   int rc = swiotlb_init_late(swiotlb_size_or_default(),
+  GFP_KERNEL, xen_swiotlb_fixup);
+   if (rc < 0)
+   return rc;
+   }
 
/* XXX: this switches the dma ops under live devices! */
dma_ops = _swiotlb_dma_ops;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index c2da3eb4826e8..df8085b50df10 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -104,7 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
dma_addr_t dma_addr)
return 0;
 }
 
-static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 {
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -130,132 +130,6 @@ static int xen_swiotlb_fixup(void *buf, unsigned long 
nslabs)
return 0;
 }
 
-enum xen_swiotlb_err {
-   

[PATCH 12/15] swiotlb: provide swiotlb_init variants that remap the buffer

2022-04-03 Thread Christoph Hellwig
To shared more code between swiotlb and xen-swiotlb, offer a
swiotlb_init_remap interface and add a remap callback to
swiotlb_init_late that will allow Xen to remap the buffer the
buffer without duplicating much of the logic.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/pci/sta2x11-fixup.c |  2 +-
 include/linux/swiotlb.h  |  5 -
 kernel/dma/swiotlb.c | 36 +---
 3 files changed, 38 insertions(+), 5 deletions(-)

diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index c7e6faf59a861..7368afc039987 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
int size = STA2X11_SWIOTLB_SIZE;
/* First instance: register your own swiotlb area */
dev_info(>dev, "Using SWIOTLB (size %i)\n", size);
-   if (swiotlb_init_late(size, GFP_DMA))
+   if (swiotlb_init_late(size, GFP_DMA, NULL))
dev_emerg(>dev, "init swiotlb failed\n");
}
list_add(>list, _instance_list);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index ee655f2e4d28b..7b50c82f84ce9 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -36,8 +36,11 @@ struct scatterlist;
 
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
 unsigned long swiotlb_size_or_default(void);
+void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+   int (*remap)(void *tlb, unsigned long nslabs));
+int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+   int (*remap)(void *tlb, unsigned long nslabs));
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
-int swiotlb_init_late(size_t size, gfp_t gfp_mask);
 extern void __init swiotlb_update_mem_attributes(void);
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 119187afc65ec..d5fe8f5e08300 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -256,9 +256,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs,
  * Statically reserve bounce buffer space and initialize bounce buffer data
  * structures for the software IO TLB used to implement the DMA API.
  */
-void __init swiotlb_init(bool addressing_limit, unsigned int flags)
+void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+   int (*remap)(void *tlb, unsigned long nslabs))
 {
-   size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
+   unsigned long nslabs = default_nslabs;
+   size_t bytes;
void *tlb;
 
if (!addressing_limit && !swiotlb_force_bounce)
@@ -271,12 +273,23 @@ void __init swiotlb_init(bool addressing_limit, unsigned 
int flags)
 * allow to pick a location everywhere for hypervisors with guest
 * memory encryption.
 */
+retry:
+   bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
if (flags & SWIOTLB_ANY)
tlb = memblock_alloc(bytes, PAGE_SIZE);
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb)
goto fail;
+   if (remap && remap(tlb, nslabs) < 0) {
+   memblock_free(tlb, PAGE_ALIGN(bytes));
+
+   nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
+   if (nslabs < IO_TLB_MIN_SLABS)
+   panic("%s: Failed to remap %zu bytes\n",
+ __func__, bytes);
+   goto retry;
+   }
if (swiotlb_init_with_tbl(tlb, default_nslabs, flags))
goto fail_free_mem;
return;
@@ -287,12 +300,18 @@ void __init swiotlb_init(bool addressing_limit, unsigned 
int flags)
pr_warn("Cannot allocate buffer");
 }
 
+void __init swiotlb_init(bool addressing_limit, unsigned int flags)
+{
+   return swiotlb_init_remap(addressing_limit, flags, NULL);
+}
+
 /*
  * Systems with larger DMA zones (those that don't support ISA) can
  * initialize the swiotlb later using the slab allocator if needed.
  * This should be just like above, but with some error catching.
  */
-int swiotlb_init_late(size_t size, gfp_t gfp_mask)
+int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+   int (*remap)(void *tlb, unsigned long nslabs))
 {
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned long bytes;
@@ -303,6 +322,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
if (swiotlb_force_disable)
return 0;
 
+retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
bytes = nslabs << IO_TLB_SHIFT;
@@ -323,6 +343,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
(PAGE_SIZE << order) >> 20);
nslabs = SLABS_PER_PAGE << order;
}
+   if 

[PATCH 11/15] swiotlb: pass a gfp_mask argument to swiotlb_init_late

2022-04-03 Thread Christoph Hellwig
Let the caller chose a zone to allocate from.  This will be used
later on by the xen-swiotlb initialization on arm.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Anshuman Khandual 
---
 arch/x86/pci/sta2x11-fixup.c | 2 +-
 include/linux/swiotlb.h  | 2 +-
 kernel/dma/swiotlb.c | 7 ++-
 3 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index e0c039a75b2db..c7e6faf59a861 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
int size = STA2X11_SWIOTLB_SIZE;
/* First instance: register your own swiotlb area */
dev_info(>dev, "Using SWIOTLB (size %i)\n", size);
-   if (swiotlb_init_late(size))
+   if (swiotlb_init_late(size, GFP_DMA))
dev_emerg(>dev, "init swiotlb failed\n");
}
list_add(>list, _instance_list);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index eabdd89987027..ee655f2e4d28b 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -37,7 +37,7 @@ struct scatterlist;
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
 unsigned long swiotlb_size_or_default(void);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
-int swiotlb_init_late(size_t size);
+int swiotlb_init_late(size_t size, gfp_t gfp_mask);
 extern void __init swiotlb_update_mem_attributes(void);
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index f6e091424af3d..119187afc65ec 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -292,7 +292,7 @@ void __init swiotlb_init(bool addressing_limit, unsigned 
int flags)
  * initialize the swiotlb later using the slab allocator if needed.
  * This should be just like above, but with some error catching.
  */
-int swiotlb_init_late(size_t size)
+int swiotlb_init_late(size_t size, gfp_t gfp_mask)
 {
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned long bytes;
@@ -303,15 +303,12 @@ int swiotlb_init_late(size_t size)
if (swiotlb_force_disable)
return 0;
 
-   /*
-* Get IO TLB memory from the low pages
-*/
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
bytes = nslabs << IO_TLB_SHIFT;
 
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-   vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+   vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
  order);
if (vstart)
break;
-- 
2.30.2



[PATCH 09/15] swiotlb: make the swiotlb_init interface more useful

2022-04-03 Thread Christoph Hellwig
Pass a bool to pass if swiotlb needs to be enabled based on the
addressing needs and replace the verbose argument with a set of
flags, including one to force enable bounce buffering.

Note that this patch removes the possibility to force xen-swiotlb
use using swiotlb=force on the command line on x86 (arm and arm64
never supported that), but this interface will be restored shortly.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/mm/init.c |  6 +
 arch/arm64/mm/init.c   |  6 +
 arch/ia64/mm/init.c|  4 +--
 arch/mips/cavium-octeon/dma-octeon.c   |  2 +-
 arch/mips/loongson64/dma.c |  2 +-
 arch/mips/sibyte/common/dma.c  |  2 +-
 arch/powerpc/mm/mem.c  |  3 ++-
 arch/powerpc/platforms/pseries/setup.c |  3 ---
 arch/riscv/mm/init.c   |  8 +-
 arch/s390/mm/init.c|  3 +--
 arch/x86/kernel/pci-dma.c  | 15 ++-
 drivers/xen/swiotlb-xen.c  |  4 +--
 include/linux/swiotlb.h| 15 ++-
 include/trace/events/swiotlb.h | 29 -
 kernel/dma/swiotlb.c   | 35 ++
 15 files changed, 55 insertions(+), 82 deletions(-)

diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fe249ea919083..ce64bdb55a16b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -271,11 +271,7 @@ static void __init free_highpages(void)
 void __init mem_init(void)
 {
 #ifdef CONFIG_ARM_LPAE
-   if (swiotlb_force == SWIOTLB_FORCE ||
-   max_pfn > arm_dma_pfn_limit)
-   swiotlb_init(1);
-   else
-   swiotlb_force = SWIOTLB_NO_FORCE;
+   swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
 #endif
 
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8ac25f19084e8..7b6ea4d6733d6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -398,11 +398,7 @@ void __init bootmem_init(void)
  */
 void __init mem_init(void)
 {
-   if (swiotlb_force == SWIOTLB_FORCE ||
-   max_pfn > PFN_DOWN(arm64_dma_phys_limit))
-   swiotlb_init(1);
-   else if (!xen_swiotlb_detect())
-   swiotlb_force = SWIOTLB_NO_FORCE;
+   swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
 
/* this will put all unused low memory onto the freelists */
memblock_free_all();
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 5d165607bf354..3c3e15b22608f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -437,9 +437,7 @@ mem_init (void)
if (iommu_detected)
break;
 #endif
-#ifdef CONFIG_SWIOTLB
-   swiotlb_init(1);
-#endif
+   swiotlb_init(true, SWIOTLB_VERBOSE);
} while (0);
 
 #ifdef CONFIG_FLATMEM
diff --git a/arch/mips/cavium-octeon/dma-octeon.c 
b/arch/mips/cavium-octeon/dma-octeon.c
index fb7547e217263..9fbba6a8fa4c5 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -235,5 +235,5 @@ void __init plat_swiotlb_setup(void)
 #endif
 
swiotlb_adjust_size(swiotlbsize);
-   swiotlb_init(1);
+   swiotlb_init(true, SWIOTLB_VERBOSE);
 }
diff --git a/arch/mips/loongson64/dma.c b/arch/mips/loongson64/dma.c
index 364f2f27c8723..8220a1bc0db64 100644
--- a/arch/mips/loongson64/dma.c
+++ b/arch/mips/loongson64/dma.c
@@ -24,5 +24,5 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 
 void __init plat_swiotlb_setup(void)
 {
-   swiotlb_init(1);
+   swiotlb_init(true, SWIOTLB_VERBOSE);
 }
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
index eb47a94f3583e..c5c2c782aff68 100644
--- a/arch/mips/sibyte/common/dma.c
+++ b/arch/mips/sibyte/common/dma.c
@@ -10,5 +10,5 @@
 
 void __init plat_swiotlb_setup(void)
 {
-   swiotlb_init(1);
+   swiotlb_init(true, SWIOTLB_VERBOSE);
 }
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8e301cd8925b2..e1519e2edc656 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 #include 
@@ -251,7 +252,7 @@ void __init mem_init(void)
if (is_secure_guest())
svm_swiotlb_init();
else
-   swiotlb_init(0);
+   swiotlb_init(ppc_swiotlb_enable, 0);
 #endif
 
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/powerpc/platforms/pseries/setup.c 
b/arch/powerpc/platforms/pseries/setup.c
index 069d7b3bb142e..c6e06d91b6602 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -838,9 +838,6 @@ static void __init pSeries_setup_arch(void)
}
 
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
-
-   if (swiotlb_force == SWIOTLB_FORCE)
-   ppc_swiotlb_enable 

[PATCH 10/15] swiotlb: add a SWIOTLB_ANY flag to lift the low memory restriction

2022-04-03 Thread Christoph Hellwig
Power SVM wants to allocate a swiotlb buffer that is not restricted to
low memory for the trusted hypervisor scheme.  Consolidate the support
for this into the swiotlb_init interface by adding a new flag.

Signed-off-by: Christoph Hellwig 
---
 arch/powerpc/include/asm/svm.h   |  4 
 arch/powerpc/include/asm/swiotlb.h   |  1 +
 arch/powerpc/kernel/dma-swiotlb.c|  1 +
 arch/powerpc/mm/mem.c|  5 +
 arch/powerpc/platforms/pseries/svm.c | 26 +-
 include/linux/swiotlb.h  |  1 +
 kernel/dma/swiotlb.c | 11 +--
 7 files changed, 14 insertions(+), 35 deletions(-)

diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
index 7546402d796af..85580b30aba48 100644
--- a/arch/powerpc/include/asm/svm.h
+++ b/arch/powerpc/include/asm/svm.h
@@ -15,8 +15,6 @@ static inline bool is_secure_guest(void)
return mfmsr() & MSR_S;
 }
 
-void __init svm_swiotlb_init(void);
-
 void dtl_cache_ctor(void *addr);
 #define get_dtl_cache_ctor()   (is_secure_guest() ? dtl_cache_ctor : NULL)
 
@@ -27,8 +25,6 @@ static inline bool is_secure_guest(void)
return false;
 }
 
-static inline void svm_swiotlb_init(void) {}
-
 #define get_dtl_cache_ctor() NULL
 
 #endif /* CONFIG_PPC_SVM */
diff --git a/arch/powerpc/include/asm/swiotlb.h 
b/arch/powerpc/include/asm/swiotlb.h
index 3c1a1cd161286..4203b5e0a88ed 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -9,6 +9,7 @@
 #include 
 
 extern unsigned int ppc_swiotlb_enable;
+extern unsigned int ppc_swiotlb_flags;
 
 #ifdef CONFIG_SWIOTLB
 void swiotlb_detect_4g(void);
diff --git a/arch/powerpc/kernel/dma-swiotlb.c 
b/arch/powerpc/kernel/dma-swiotlb.c
index fc7816126a401..ba256c37bcc0f 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -10,6 +10,7 @@
 #include 
 
 unsigned int ppc_swiotlb_enable;
+unsigned int ppc_swiotlb_flags;
 
 void __init swiotlb_detect_4g(void)
 {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e1519e2edc656..a4d65418c30a9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -249,10 +249,7 @@ void __init mem_init(void)
 * back to to-down.
 */
memblock_set_bottom_up(true);
-   if (is_secure_guest())
-   svm_swiotlb_init();
-   else
-   swiotlb_init(ppc_swiotlb_enable, 0);
+   swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
 #endif
 
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/powerpc/platforms/pseries/svm.c 
b/arch/powerpc/platforms/pseries/svm.c
index c5228f4969eb2..3b4045d508ec8 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -28,7 +28,7 @@ static int __init init_svm(void)
 * need to use the SWIOTLB buffer for DMA even if dma_capable() says
 * otherwise.
 */
-   swiotlb_force = SWIOTLB_FORCE;
+   ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
 
/* Share the SWIOTLB buffer with the host. */
swiotlb_update_mem_attributes();
@@ -37,30 +37,6 @@ static int __init init_svm(void)
 }
 machine_early_initcall(pseries, init_svm);
 
-/*
- * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
- * can allocate the buffer anywhere in memory. Since the hypervisor doesn't 
have
- * any addressing limitation, we don't need to allocate it in low addresses.
- */
-void __init svm_swiotlb_init(void)
-{
-   unsigned char *vstart;
-   unsigned long bytes, io_tlb_nslabs;
-
-   io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
-   io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-
-   bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
-   vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
-   if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
-   return;
-
-
-   memblock_free(vstart, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
-   panic("SVM: Cannot allocate SWIOTLB buffer");
-}
-
 int set_memory_encrypted(unsigned long addr, int numpages)
 {
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index ae0407173e845..eabdd89987027 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -15,6 +15,7 @@ struct scatterlist;
 
 #define SWIOTLB_VERBOSE(1 << 0) /* verbose initialization */
 #define SWIOTLB_FORCE  (1 << 1) /* force bounce buffering */
+#define SWIOTLB_ANY(1 << 2) /* allow any memory for the buffer */
 
 /*
  * Maximum allowable number of contiguous slabs to map,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 86e877a96b828..f6e091424af3d 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -266,8 +266,15 @@ void __init swiotlb_init(bool addressing_limit, unsigned 
int flags)
if (swiotlb_force_disable)
return;
 
-   /* Get IO TLB 

[PATCH 08/15] x86: centralize setting SWIOTLB_FORCE when guest memory encryption is enabled

2022-04-03 Thread Christoph Hellwig
Move enabling SWIOTLB_FORCE for guest memory encryption into common code.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/kernel/cpu/mshyperv.c | 8 
 arch/x86/kernel/pci-dma.c  | 8 
 arch/x86/mm/mem_encrypt_amd.c  | 3 ---
 3 files changed, 8 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 4b67094215bba..5b8f2c3571601 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -337,14 +337,6 @@ static void __init ms_hyperv_init_platform(void)
swiotlb_unencrypted_base = 
ms_hyperv.shared_gpa_boundary;
 #endif
}
-
-#ifdef CONFIG_SWIOTLB
-   /*
-* Enable swiotlb force mode in Isolation VM to
-* use swiotlb bounce buffer for dma transaction.
-*/
-   swiotlb_force = SWIOTLB_FORCE;
-#endif
/* Isolation VMs are unenlightened SEV-based VMs, thus this 
check: */
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
if (hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index df96926421be0..04140e20ef1a3 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -53,6 +53,14 @@ static void __init pci_swiotlb_detect(void)
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
x86_swiotlb_enable = true;
 
+   /*
+* Guest with guest memory encryption currently perform all DMA through
+* bounce buffers as the hypervisor can't access arbitrary VM memory
+* that is not explicitly shared with it.
+*/
+   if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+   swiotlb_force = SWIOTLB_FORCE;
+
if (swiotlb_force == SWIOTLB_FORCE)
x86_swiotlb_enable = true;
 }
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 6169053c28541..d732d727d3dee 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -432,9 +432,6 @@ void __init sme_early_init(void)
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = pgprot_encrypted(protection_map[i]);
 
-   if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
-   swiotlb_force = SWIOTLB_FORCE;
-
x86_platform.guest.enc_status_change_prepare = 
amd_enc_status_change_prepare;
x86_platform.guest.enc_status_change_finish  = 
amd_enc_status_change_finish;
x86_platform.guest.enc_tlb_flush_required= 
amd_enc_tlb_flush_required;
-- 
2.30.2



[PATCH 07/15] x86: remove the IOMMU table infrastructure

2022-04-03 Thread Christoph Hellwig
The IOMMU table tries to separate the different IOMMUs into different
backends, but actually requires various cross calls.

Rewrite the code to do the generic swiotlb/swiotlb-xen setup directly
in pci-dma.c and then just call into the IOMMU drivers.

Signed-off-by: Christoph Hellwig 
---
 arch/ia64/include/asm/iommu_table.h|   7 --
 arch/x86/include/asm/dma-mapping.h |   1 -
 arch/x86/include/asm/gart.h|   5 +-
 arch/x86/include/asm/iommu.h   |   6 ++
 arch/x86/include/asm/iommu_table.h | 102 ---
 arch/x86/include/asm/swiotlb.h |  30 ---
 arch/x86/include/asm/xen/swiotlb-xen.h |   2 -
 arch/x86/kernel/Makefile   |   2 -
 arch/x86/kernel/amd_gart_64.c  |   5 +-
 arch/x86/kernel/aperture_64.c  |  14 ++--
 arch/x86/kernel/pci-dma.c  | 107 -
 arch/x86/kernel/pci-iommu_table.c  |  77 --
 arch/x86/kernel/pci-swiotlb.c  |  77 --
 arch/x86/kernel/tboot.c|   1 -
 arch/x86/kernel/vmlinux.lds.S  |  12 ---
 arch/x86/xen/Makefile  |   2 -
 arch/x86/xen/pci-swiotlb-xen.c |  96 --
 drivers/iommu/amd/init.c   |   6 --
 drivers/iommu/amd/iommu.c  |   5 +-
 drivers/iommu/intel/dmar.c |   6 +-
 include/linux/dmar.h   |   6 +-
 21 files changed, 110 insertions(+), 459 deletions(-)
 delete mode 100644 arch/ia64/include/asm/iommu_table.h
 delete mode 100644 arch/x86/include/asm/iommu_table.h
 delete mode 100644 arch/x86/include/asm/swiotlb.h
 delete mode 100644 arch/x86/kernel/pci-iommu_table.c
 delete mode 100644 arch/x86/kernel/pci-swiotlb.c
 delete mode 100644 arch/x86/xen/pci-swiotlb-xen.c

diff --git a/arch/ia64/include/asm/iommu_table.h 
b/arch/ia64/include/asm/iommu_table.h
deleted file mode 100644
index cc96116ac276a..0
--- a/arch/ia64/include/asm/iommu_table.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_IOMMU_TABLE_H
-#define _ASM_IA64_IOMMU_TABLE_H
-
-#define IOMMU_INIT_POST(_detect)
-
-#endif /* _ASM_IA64_IOMMU_TABLE_H */
diff --git a/arch/x86/include/asm/dma-mapping.h 
b/arch/x86/include/asm/dma-mapping.h
index bb1654fe0ce74..256fd8115223d 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -9,7 +9,6 @@
 
 #include 
 #include 
-#include 
 
 extern int iommu_merge;
 extern int panic_on_overflow;
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 3185565743459..5af8088a10df6 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -38,7 +38,7 @@ extern int gart_iommu_aperture_disabled;
 extern void early_gart_iommu_check(void);
 extern int gart_iommu_init(void);
 extern void __init gart_parse_options(char *);
-extern int gart_iommu_hole_init(void);
+void gart_iommu_hole_init(void);
 
 #else
 #define gart_iommu_aperture0
@@ -51,9 +51,8 @@ static inline void early_gart_iommu_check(void)
 static inline void gart_parse_options(char *options)
 {
 }
-static inline int gart_iommu_hole_init(void)
+static inline void gart_iommu_hole_init(void)
 {
-   return -ENODEV;
 }
 #endif
 
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index bf1ed2ddc74bd..dba89ed40d38d 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -9,6 +9,12 @@
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
 
+#ifdef CONFIG_SWIOTLB
+extern bool x86_swiotlb_enable;
+#else
+#define x86_swiotlb_enable false
+#endif
+
 /* 10 seconds */
 #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
 
diff --git a/arch/x86/include/asm/iommu_table.h 
b/arch/x86/include/asm/iommu_table.h
deleted file mode 100644
index 1fb3fd1a83c25..0
--- a/arch/x86/include/asm/iommu_table.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_IOMMU_TABLE_H
-#define _ASM_X86_IOMMU_TABLE_H
-
-#include 
-
-/*
- * History lesson:
- * The execution chain of IOMMUs in 2.6.36 looks as so:
- *
- *[xen-swiotlb]
- * |
- * +[swiotlb *]--+
- */ | \
- *   /  |  \
- *[GART] [Calgary]  [Intel VT-d]
- * /
- */
- * [AMD-Vi]
- *
- * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip
- * over the rest of IOMMUs and unconditionally initialize the SWIOTLB.
- * Also it would surreptitiously initialize set the swiotlb=1 if there were
- * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb
- * flag would be turned off by all IOMMUs except the Calgary one.
- *
- * The IOMMU_INIT* macros allow a similar tree (or more complex if desired)
- * to be built by defining who we depend on.
- *
- * And all that needs to be done is to use one of the macros in the IOMMU
- * and the pci-dma.c will take care of the rest.
- */
-
-struct 

[PATCH 06/15] MIPS/octeon: use swiotlb_init instead of open coding it

2022-04-03 Thread Christoph Hellwig
Use the generic swiotlb initialization helper instead of open coding it.

Signed-off-by: Christoph Hellwig 
Acked-by: Thomas Bogendoerfer 
---
 arch/mips/cavium-octeon/dma-octeon.c | 15 ++-
 arch/mips/pci/pci-octeon.c   |  2 +-
 2 files changed, 3 insertions(+), 14 deletions(-)

diff --git a/arch/mips/cavium-octeon/dma-octeon.c 
b/arch/mips/cavium-octeon/dma-octeon.c
index df70308db0e69..fb7547e217263 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -186,15 +186,12 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t 
daddr)
return daddr;
 }
 
-char *octeon_swiotlb;
-
 void __init plat_swiotlb_setup(void)
 {
phys_addr_t start, end;
phys_addr_t max_addr;
phys_addr_t addr_size;
size_t swiotlbsize;
-   unsigned long swiotlb_nslabs;
u64 i;
 
max_addr = 0;
@@ -236,15 +233,7 @@ void __init plat_swiotlb_setup(void)
if (OCTEON_IS_OCTEON2() && max_addr >= 0x1ul)
swiotlbsize = 64 * (1<<20);
 #endif
-   swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
-   swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
-   swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
-
-   octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
-   if (!octeon_swiotlb)
-   panic("%s: Failed to allocate %zu bytes align=%lx\n",
- __func__, swiotlbsize, PAGE_SIZE);
 
-   if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
-   panic("Cannot allocate SWIOTLB buffer");
+   swiotlb_adjust_size(swiotlbsize);
+   swiotlb_init(1);
 }
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index fc29b85cfa926..e457a18cbdc59 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -664,7 +664,7 @@ static int __init octeon_pci_setup(void)
 
/* BAR1 movable regions contiguous to cover the swiotlb */
octeon_bar1_pci_phys =
-   virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
+   io_tlb_default_mem.start & ~((1ull << 22) - 1);
 
for (index = 0; index < 32; index++) {
union cvmx_pci_bar1_indexx bar1_index;
-- 
2.30.2



[PATCH 05/15] arm/xen: don't check for xen_initial_domain() in xen_create_contiguous_region

2022-04-03 Thread Christoph Hellwig
From: Stefano Stabellini 

It used to be that Linux enabled swiotlb-xen when running a dom0 on ARM.
Since f5079a9a2a31 "xen/arm: introduce XENFEAT_direct_mapped and
XENFEAT_not_direct_mapped", Linux detects whether to enable or disable
swiotlb-xen based on the new feature flags: XENFEAT_direct_mapped and
XENFEAT_not_direct_mapped.

However, there is still a leftover xen_initial_domain() check in
xen_create_contiguous_region. Remove the check as
xen_create_contiguous_region is only called by swiotlb-xen during
initialization. If xen_create_contiguous_region is called, we know Linux
is running 1:1 mapped so there is no need for additional checks.

Also update the in-code comment.

Signed-off-by: Stefano Stabellini 
Signed-off-by: Christoph Hellwig 
---
 arch/arm/xen/mm.c | 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index a7e54a087b802..28c2070602535 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -122,10 +122,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, 
unsigned int order,
 unsigned int address_bits,
 dma_addr_t *dma_handle)
 {
-   if (!xen_initial_domain())
-   return -EINVAL;
-
-   /* we assume that dom0 is mapped 1:1 for now */
+   /* the domain is 1:1 mapped to use swiotlb-xen */
*dma_handle = pstart;
return 0;
 }
-- 
2.30.2



[PATCH 04/15] swiotlb: rename swiotlb_late_init_with_default_size

2022-04-03 Thread Christoph Hellwig
swiotlb_late_init_with_default_size is an overly verbose name that
doesn't even catch what the function is doing, given that the size is
not just a default but the actual requested size.

Rename it to swiotlb_init_late.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Anshuman Khandual 
---
 arch/x86/pci/sta2x11-fixup.c | 2 +-
 include/linux/swiotlb.h  | 2 +-
 kernel/dma/swiotlb.c | 6 ++
 3 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 101081ad64b6d..e0c039a75b2db 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
int size = STA2X11_SWIOTLB_SIZE;
/* First instance: register your own swiotlb area */
dev_info(>dev, "Using SWIOTLB (size %i)\n", size);
-   if (swiotlb_late_init_with_default_size(size))
+   if (swiotlb_init_late(size))
dev_emerg(>dev, "init swiotlb failed\n");
}
list_add(>list, _instance_list);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 9fb3a568f0c51..b48b26bfa0edb 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -40,7 +40,7 @@ extern void swiotlb_init(int verbose);
 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 unsigned long swiotlb_size_or_default(void);
 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
-extern int swiotlb_late_init_with_default_size(size_t default_size);
+int swiotlb_init_late(size_t size);
 extern void __init swiotlb_update_mem_attributes(void);
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index e0127e397335e..9a4fe6e48a074 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -281,11 +281,9 @@ swiotlb_init(int verbose)
  * initialize the swiotlb later using the slab allocator if needed.
  * This should be just like above, but with some error catching.
  */
-int
-swiotlb_late_init_with_default_size(size_t default_size)
+int swiotlb_init_late(size_t size)
 {
-   unsigned long nslabs =
-   ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
+   unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned long bytes;
unsigned char *vstart = NULL;
unsigned int order;
-- 
2.30.2



[PATCH 02/15] swiotlb: make swiotlb_exit a no-op if SWIOTLB_FORCE is set

2022-04-03 Thread Christoph Hellwig
If force bouncing is enabled we can't release the buffers.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Anshuman Khandual 
---
 kernel/dma/swiotlb.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 73a41cec9e386..98bb0eb44a7bf 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -369,6 +369,9 @@ void __init swiotlb_exit(void)
unsigned long tbl_vaddr;
size_t tbl_size, slots_size;
 
+   if (swiotlb_force == SWIOTLB_FORCE)
+   return;
+
if (!mem->nslabs)
return;
 
-- 
2.30.2



[PATCH 01/15] dma-direct: use is_swiotlb_active in dma_direct_map_page

2022-04-03 Thread Christoph Hellwig
Use the more specific is_swiotlb_active check instead of checking the
global swiotlb_force variable.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Anshuman Khandual 
---
 kernel/dma/direct.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 4632b0f4f72eb..4dc16e08c7e1a 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -91,7 +91,7 @@ static inline dma_addr_t dma_direct_map_page(struct device 
*dev,
return swiotlb_map(dev, phys, size, dir, attrs);
 
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
-   if (swiotlb_force != SWIOTLB_NO_FORCE)
+   if (is_swiotlb_active(dev))
return swiotlb_map(dev, phys, size, dir, attrs);
 
dev_WARN_ONCE(dev, 1,
-- 
2.30.2



cleanup swiotlb initialization v8

2022-04-03 Thread Christoph Hellwig
Hi all,

this series tries to clean up the swiotlb initialization, including
that of swiotlb-xen.  To get there is also removes the x86 iommu table
infrastructure that massively obsfucates the initialization path.

Git tree:

git://git.infradead.org/users/hch/misc.git swiotlb-init-cleanup

Gitweb:


http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/swiotlb-init-cleanup

Changes since v7:
 - rebased to Linux 5.18-rc1
 - better document the lower bound swiotlb size for xen-swiotlb
 - improve the nslabs calculation for the retry case in
   swiotlb_init_remap and swiotlb_init_late

Changes since v6:
 - use IO_TLB_MIN_SLABS instead of open coding the constant
 - call the remap callback later in swiotlb_init_late
 - set SWIOTLB_ANY for xen/x86

Changes since v5:
 - split a patch into three
 - fix setting x86_swiotlb_enable for Xen
 - fix a comment about forced bounce buffering for guest memory
   encryption
 - remove the xen_initial_domain check from
   xen_create_contiguous_region

Changes since v3:
 - fix a compilation issue on some powerpc configfs
 - fix and cleanup how forced bounce buffering is enabled for
   guest memory encryption

Changes since v2:
 - make ppc_swiotlb_flags actually work again
 - also force enable swiotlb for guest encrypted memory to cater
   to hyperv which doesn't set the host encrypted memory flag

Changes since v1:
 - skip IOMMU initialization on Xen PV kernels
 - various small whitespace / typo fixes

Diffstat:
 arch/ia64/include/asm/iommu_table.h  |7 -
 arch/x86/include/asm/iommu_table.h   |  102 ---
 arch/x86/include/asm/swiotlb.h   |   30 -
 arch/x86/kernel/pci-iommu_table.c|   77 --
 arch/x86/kernel/pci-swiotlb.c|   77 --
 arch/x86/xen/pci-swiotlb-xen.c   |   96 --
 b/arch/arm/mm/init.c |6 -
 b/arch/arm/xen/mm.c  |   26 ++--
 b/arch/arm64/mm/init.c   |6 -
 b/arch/ia64/mm/init.c|4 
 b/arch/mips/cavium-octeon/dma-octeon.c   |   15 --
 b/arch/mips/loongson64/dma.c |2 
 b/arch/mips/pci/pci-octeon.c |2 
 b/arch/mips/sibyte/common/dma.c  |2 
 b/arch/powerpc/include/asm/svm.h |4 
 b/arch/powerpc/include/asm/swiotlb.h |1 
 b/arch/powerpc/kernel/dma-swiotlb.c  |1 
 b/arch/powerpc/mm/mem.c  |6 -
 b/arch/powerpc/platforms/pseries/setup.c |3 
 b/arch/powerpc/platforms/pseries/svm.c   |   26 
 b/arch/riscv/mm/init.c   |8 -
 b/arch/s390/mm/init.c|3 
 b/arch/x86/include/asm/dma-mapping.h |   12 --
 b/arch/x86/include/asm/gart.h|5 
 b/arch/x86/include/asm/iommu.h   |8 +
 b/arch/x86/include/asm/xen/page.h|5 
 b/arch/x86/include/asm/xen/swiotlb-xen.h |2 
 b/arch/x86/kernel/Makefile   |2 
 b/arch/x86/kernel/amd_gart_64.c  |5 
 b/arch/x86/kernel/aperture_64.c  |   14 --
 b/arch/x86/kernel/cpu/mshyperv.c |8 -
 b/arch/x86/kernel/pci-dma.c  |  114 +
 b/arch/x86/kernel/tboot.c|1 
 b/arch/x86/kernel/vmlinux.lds.S  |   12 --
 b/arch/x86/mm/mem_encrypt_amd.c  |3 
 b/arch/x86/pci/sta2x11-fixup.c   |2 
 b/arch/x86/xen/Makefile  |2 
 b/drivers/iommu/amd/init.c   |6 -
 b/drivers/iommu/amd/iommu.c  |5 
 b/drivers/iommu/intel/dmar.c |6 -
 b/drivers/xen/swiotlb-xen.c  |  132 -
 b/include/linux/dmar.h   |6 -
 b/include/linux/swiotlb.h|   22 ++--
 b/include/trace/events/swiotlb.h |   29 +
 b/include/xen/arm/page.h |1 
 b/include/xen/swiotlb-xen.h  |8 +
 b/kernel/dma/direct.h|2 
 b/kernel/dma/swiotlb.c   |  163 ++-
 48 files changed, 252 insertions(+), 827 deletions(-)


[PATCH 03/15] swiotlb: simplify swiotlb_max_segment

2022-04-03 Thread Christoph Hellwig
Remove the bogus Xen override that was usually larger than the actual
size and just calculate the value on demand.  Note that
swiotlb_max_segment still doesn't make sense as an interface and should
eventually be removed.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Anshuman Khandual 
---
 drivers/xen/swiotlb-xen.c |  2 --
 include/linux/swiotlb.h   |  1 -
 kernel/dma/swiotlb.c  | 20 +++-
 3 files changed, 3 insertions(+), 20 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 47aebd98f52f5..485cd06ed39e7 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -202,7 +202,6 @@ int xen_swiotlb_init(void)
rc = swiotlb_late_init_with_tbl(start, nslabs);
if (rc)
return rc;
-   swiotlb_set_max_segment(PAGE_SIZE);
return 0;
 error:
if (nslabs > 1024 && repeat--) {
@@ -254,7 +253,6 @@ void __init xen_swiotlb_init_early(void)
 
if (swiotlb_init_with_tbl(start, nslabs, true))
panic("Cannot allocate SWIOTLB buffer");
-   swiotlb_set_max_segment(PAGE_SIZE);
 }
 #endif /* CONFIG_X86 */
 
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index f6c3638255d54..9fb3a568f0c51 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -164,7 +164,6 @@ static inline void swiotlb_adjust_size(unsigned long size)
 #endif /* CONFIG_SWIOTLB */
 
 extern void swiotlb_print_info(void);
-extern void swiotlb_set_max_segment(unsigned int);
 
 #ifdef CONFIG_DMA_RESTRICTED_POOL
 struct page *swiotlb_alloc(struct device *dev, size_t size);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 98bb0eb44a7bf..e0127e397335e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -68,12 +68,6 @@ struct io_tlb_mem io_tlb_default_mem;
 
 phys_addr_t swiotlb_unencrypted_base;
 
-/*
- * Max segment that we can provide which (if pages are contingous) will
- * not be bounced (unless SWIOTLB_FORCE is set).
- */
-static unsigned int max_segment;
-
 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
 
 static int __init
@@ -97,18 +91,12 @@ early_param("swiotlb", setup_io_tlb_npages);
 
 unsigned int swiotlb_max_segment(void)
 {
-   return io_tlb_default_mem.nslabs ? max_segment : 0;
+   if (!io_tlb_default_mem.nslabs)
+   return 0;
+   return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE);
 }
 EXPORT_SYMBOL_GPL(swiotlb_max_segment);
 
-void swiotlb_set_max_segment(unsigned int val)
-{
-   if (swiotlb_force == SWIOTLB_FORCE)
-   max_segment = 1;
-   else
-   max_segment = rounddown(val, PAGE_SIZE);
-}
-
 unsigned long swiotlb_size_or_default(void)
 {
return default_nslabs << IO_TLB_SHIFT;
@@ -258,7 +246,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
 
if (verbose)
swiotlb_print_info();
-   swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
return 0;
 }
 
@@ -359,7 +346,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
 
swiotlb_print_info();
-   swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
return 0;
 }
 
-- 
2.30.2



Re: [PATCH] powerpc/85xx: Remove fsl,85... bindings

2022-04-03 Thread Michael Ellerman
Borislav Petkov  writes:
> On Thu, Mar 31, 2022 at 12:13:10PM +0200, Christophe Leroy wrote:
>> Since commit 8a4ab218ef70 ("powerpc/85xx: Change deprecated binding
>> for 85xx-based boards"), those bindings are not used anymore.
>> 
>> A comment in drivers/edac/mpc85xx_edac.c say they are to be removed
>> with kernel 2.6.30.
>> 
>> Remove them now.
>> 
>> Signed-off-by: Christophe Leroy 
>> ---
>>  .../bindings/memory-controllers/fsl/fsl,ddr.yaml   |  6 --
>>  .../devicetree/bindings/powerpc/fsl/l2cache.txt|  6 --
>>  drivers/edac/mpc85xx_edac.c| 14 --
>>  3 files changed, 26 deletions(-)
>
>
> I'll take it through the EDAC tree of there are no objections.

Fine by me, thanks.

cheers


[PATCH] [Rebased for 5.4] powerpc/kasan: Fix early region not updated correctly

2022-04-03 Thread Christophe Leroy
From: Chen Jingwen 

This is backport for 5.4

Upstream commit dd75080aa8409ce10d50fb58981c6b59bf8707d3

The shadow's page table is not updated when PTE_RPN_SHIFT is 24
and PAGE_SHIFT is 12. It not only causes false positives but
also false negative as shown the following text.

Fix it by bringing the logic of kasan_early_shadow_page_entry here.

1. False Positive:
==
BUG: KASAN: vmalloc-out-of-bounds in pcpu_alloc+0x508/0xa50
Write of size 16 at addr f57f3be0 by task swapper/0/1

CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.15.0-12267-gdebe436e77c7 #1
Call Trace:
[c80d1c20] [c07fe7b8] dump_stack_lvl+0x4c/0x6c (unreliable)
[c80d1c40] [c02ff668] print_address_description.constprop.0+0x88/0x300
[c80d1c70] [c02ff45c] kasan_report+0x1ec/0x200
[c80d1cb0] [c0300b20] kasan_check_range+0x160/0x2f0
[c80d1cc0] [c03018a4] memset+0x34/0x90
[c80d1ce0] [c0280108] pcpu_alloc+0x508/0xa50
[c80d1d40] [c02fd7bc] __kmem_cache_create+0xfc/0x570
[c80d1d70] [c0283d64] kmem_cache_create_usercopy+0x274/0x3e0
[c80d1db0] [c2036580] init_sd+0xc4/0x1d0
[c80d1de0] [c00044a0] do_one_initcall+0xc0/0x33c
[c80d1eb0] [c2001624] kernel_init_freeable+0x2c8/0x384
[c80d1ef0] [c0004b14] kernel_init+0x24/0x170
[c80d1f10] [c001b26c] ret_from_kernel_thread+0x5c/0x64

Memory state around the buggy address:
 f57f3a80: f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8
 f57f3b00: f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8
>f57f3b80: f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8
   ^
 f57f3c00: f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8
 f57f3c80: f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8 f8
==

2. False Negative (with KASAN tests):
==
Before fix:
ok 45 - kmalloc_double_kzfree
# vmalloc_oob: EXPECTATION FAILED at lib/test_kasan.c:1039
KASAN failure expected in "((volatile char *)area)[3100]", but none occurred
not ok 46 - vmalloc_oob
not ok 1 - kasan

==
After fix:
ok 1 - kasan

Fixes: cbd18991e24fe ("powerpc/mm: Fix an Oops in kasan_mmu_init()")
Cc: sta...@vger.kernel.org # 5.4.x
Signed-off-by: Chen Jingwen 
Reviewed-by: Christophe Leroy 
Signed-off-by: Michael Ellerman 
Link: https://lore.kernel.org/r/20211229035226.59159-1-chenjingw...@huawei.com
[chleroy: Backport for 5.4]
Signed-off-by: Christophe Leroy 
---
 arch/powerpc/mm/kasan/kasan_init_32.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c 
b/arch/powerpc/mm/kasan/kasan_init_32.c
index 1cfe57b51d7e..3f78007a7282 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -121,7 +121,7 @@ static void __init kasan_remap_early_shadow_ro(void)
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), 
k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
 
-   if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+   if (pte_page(*ptep) != 
virt_to_page(lm_alias(kasan_early_shadow_page)))
continue;
 
__set_pte_at(_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), 
prot), 0);
-- 
2.35.1



Re: [PATCH] [Rebased for 5.4] powerpc/kasan: Fix early region not updated correctly

2022-04-03 Thread Greg KH
On Sun, Apr 03, 2022 at 11:54:55AM +, Christophe Leroy wrote:
> 
> 
> Le 03/04/2022 à 12:25, Greg KH a écrit :
> > On Sat, Apr 02, 2022 at 06:13:31PM +0200, Christophe Leroy wrote:
> >> From: Chen Jingwen 
> >>
> >> This is backport for 5.4
> >>
> >> Upstream commit 5647a94a26e352beed61788b46e035d9d12664cd
> > 
> > This is not a commit in Linus's tree :(
> > 
> 
> Oops. Don't know what I did, that's indeed the commit after I 
> cherry-picked it on top of 5.4.188 and before I modified it.
> 
> According to the mail you sent me yesterday to tell it FAILED to apply 
> on 5.4, the upstream commit is dd75080aa8409ce10d50fb58981c6b59bf8707d3

Can you resend with this fixed up please?

thanks,

greg k-h


[PATCH] soc: fsl: qe: Fix refcount leak in par_io_of_config

2022-04-03 Thread Miaoqian Lin
The device_node pointer is returned by of_parse_phandle() with
refcount incremented. We should use of_node_put() on it when done.
This function only calls of_node_put() in the regular path.
And it will cause refcount leak in error path.

Fixes: 986585385131 ("[POWERPC] Add QUICC Engine (QE) infrastructure")
Signed-off-by: Miaoqian Lin 
---
 drivers/soc/fsl/qe/qe_io.c | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index a5e2d0e5ab51..9f5f746bea88 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -159,12 +159,12 @@ int par_io_of_config(struct device_node *np)
pio_map = of_get_property(pio, "pio-map", _map_len);
if (pio_map == NULL) {
printk(KERN_ERR "pio-map is not set!\n");
-   return -1;
+   goto err_node_put;
}
pio_map_len /= sizeof(unsigned int);
if ((pio_map_len % 6) != 0) {
printk(KERN_ERR "pio-map format wrong!\n");
-   return -1;
+   goto err_node_put;
}
 
while (pio_map_len > 0) {
@@ -182,5 +182,9 @@ int par_io_of_config(struct device_node *np)
}
of_node_put(pio);
return 0;
+
+err_node_put:
+   of_node_put(pio);
+   return -1;
 }
 EXPORT_SYMBOL(par_io_of_config);
-- 
2.17.1



Re: [PATCH] [Rebased for 5.4] powerpc/kasan: Fix early region not updated correctly

2022-04-03 Thread Christophe Leroy


Le 03/04/2022 à 12:25, Greg KH a écrit :
> On Sat, Apr 02, 2022 at 06:13:31PM +0200, Christophe Leroy wrote:
>> From: Chen Jingwen 
>>
>> This is backport for 5.4
>>
>> Upstream commit 5647a94a26e352beed61788b46e035d9d12664cd
> 
> This is not a commit in Linus's tree :(
> 

Oops. Don't know what I did, that's indeed the commit after I 
cherry-picked it on top of 5.4.188 and before I modified it.

According to the mail you sent me yesterday to tell it FAILED to apply 
on 5.4, the upstream commit is dd75080aa8409ce10d50fb58981c6b59bf8707d3

Thanks
Christophe

Re: [PATCH] [Rebased for 5.4] powerpc/kasan: Fix early region not updated correctly

2022-04-03 Thread Greg KH
On Sat, Apr 02, 2022 at 06:13:31PM +0200, Christophe Leroy wrote:
> From: Chen Jingwen 
> 
> This is backport for 5.4
> 
> Upstream commit 5647a94a26e352beed61788b46e035d9d12664cd

This is not a commit in Linus's tree :(



Re: [PATCH] char: tpm: Prepare cleanup of powerpc's asm/prom.h

2022-04-03 Thread Jarkko Sakkinen
On Sat, Apr 02, 2022 at 12:29:19PM +0200, Christophe Leroy wrote:
> powerpc's asm/prom.h brings some headers that it doesn't
> need itself.
> 
> In order to clean it up, first add missing headers in
> users of asm/prom.h
> 
> Signed-off-by: Christophe Leroy 

I don't understand this. It changes nothing as far as kernel is concerned.

> ---
>  drivers/char/tpm/tpm_atmel.h   | 2 --
>  drivers/char/tpm/tpm_ibmvtpm.c | 1 -
>  2 files changed, 3 deletions(-)
> 
> diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
> index ba37e77e8af3..959f7cce8301 100644
> --- a/drivers/char/tpm/tpm_atmel.h
> +++ b/drivers/char/tpm/tpm_atmel.h
> @@ -26,8 +26,6 @@ struct tpm_atmel_priv {
>  
>  #ifdef CONFIG_PPC64
>  
> -#include 
> -
>  #define atmel_getb(priv, offset) readb(priv->iobase + offset)
>  #define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
>  #define atmel_request_region request_mem_region
> diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
> index 3af4c07a9342..1180cce7067a 100644
> --- a/drivers/char/tpm/tpm_ibmvtpm.c
> +++ b/drivers/char/tpm/tpm_ibmvtpm.c
> @@ -20,7 +20,6 @@
>  #include 
>  #include 
>  #include 
> -#include 
>  
>  #include "tpm.h"
>  #include "tpm_ibmvtpm.h"
> -- 
> 2.35.1
> 

BR, Jarkko