There is no benefit in splitting the 32-bit address space into two
distinct DMA zones when the 30-bit address space isn't even available on
a device. If that is the case, default to one big ZONE_DMA spanning the
whole 32-bit address space.

This will help reduce some of the issues we've seen with big crash
kernel allocations.

Signed-off-by: Nicolas Saenz Julienne <[email protected]>
---

Whith this patch, on a 8GB RPi4 the setup looks like this:

        DMA      [mem 0x0000000000000000-0x000000003fffffff]
        DMA32    [mem 0x0000000040000000-0x00000000ffffffff]
        Normal   [mem 0x0000000100000000-0x00000001ffffffff]

And stock 8GB virtme/qemu:

        DMA      [mem 0x0000000040000000-0x00000000ffffffff]
        DMA32    empty
        Normal   [mem 0x0000000100000000-0x000000023fffffff]

 arch/arm64/mm/init.c | 29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b6881d61b818..857a62611d7a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -183,13 +183,20 @@ static void __init reserve_elfcorehdr(void)
 
 /*
  * Return the maximum physical address for a zone with a given address size
- * limit. It currently assumes that for memory starting above 4G, 32-bit
- * devices will use a DMA offset.
+ * limit or zero if memory starts from an address higher than the zone 
targeted.
+ * It currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
  */
 static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
 {
-       phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 
zone_bits);
-       return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
+       phys_addr_t base = memblock_start_of_DRAM();
+       phys_addr_t offset = base & GENMASK_ULL(63, 32);
+       s64 zone_size = (1ULL << zone_bits) - (base & DMA_BIT_MASK(32));
+
+       if (zone_size <= 0)
+               return 0;
+
+       return min(base + zone_size + offset, memblock_end_of_DRAM());
 }
 
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
@@ -390,6 +397,20 @@ void __init arm64_memblock_init(void)
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
                zone_dma_bits = ARM64_ZONE_DMA_BITS;
                arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
+
+               /*
+                * We don't want to split the 32 bit address space into two DMA
+                * zones when the target lower physical address range doesn't
+                * exist. For example, if memory starts at 0x80000000 it's
+                * pointless to create a ZONE_DMA distinct of ZONE_DMA32 as
+                * there's no way to access the 30-bit address space. If that's
+                * the case just expand ZONE_DMA to cover the whole 32-bit
+                * address space.
+                */
+               if (!arm64_dma_phys_limit) {
+                       zone_dma_bits = 32;
+                       arm64_dma_phys_limit = max_zone_phys(32);
+               }
        }
 
        if (IS_ENABLED(CONFIG_ZONE_DMA32))
-- 
2.28.0

Reply via email to