On ppc64, __va(x) do check for input argument to be less than PAGE_OFFSET. In certain code paths, we want to skip that check. Add a variant ___va(x) to be used in such cases.
Switch the #define to static inline. __pa() still doesn't benefit from this. But a static inline done in this patch is better than multi-line #define. For __va() we get the type checking benefit. We still have to keep the macro __pa(x) to avoid a large number of compilation errors with the change. Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com> --- arch/powerpc/include/asm/page.h | 38 ++++++++++++++-------- arch/powerpc/mm/nohash/book3e_pgtable.c | 2 +- arch/powerpc/platforms/powernv/opal-core.c | 4 +-- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index a63fe6f3a0ff..8e8ffde0aef8 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -9,6 +9,7 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/mmdebug.h> #else #include <asm/types.h> #endif @@ -208,30 +209,41 @@ static inline bool pfn_valid(unsigned long pfn) * the other definitions for __va & __pa. */ #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) -#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) +#define ___va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET) +#define __va(x) ___va(x) #else #ifdef CONFIG_PPC64 + +#ifndef __ASSEMBLY__ /* * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. * This also results in better code generation. */ -#define __va(x) \ -({ \ - VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \ - (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \ -}) - -#define __pa(x) \ -({ \ - VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \ - (unsigned long)(x) & 0x0fffffffffffffffUL; \ -}) +static inline void *___va(phys_addr_t addr) +{ + return (void *)(addr | PAGE_OFFSET); +} + +static inline void *__va(phys_addr_t addr) +{ + VIRTUAL_BUG_ON((unsigned long)(addr) >= PAGE_OFFSET); + return ___va(addr); +} + +static inline phys_addr_t ___pa(void *addr) +{ + VIRTUAL_BUG_ON((unsigned long)(addr) < PAGE_OFFSET); + return (phys_addr_t)((unsigned long)addr & 0x0fffffffffffffffUL); +} +#define __pa(x) ___pa((void *)(x)) +#endif /* __ASSEMBLY__ */ #else /* 32-bit, non book E */ -#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) +#define ___va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) +#define __va(x) ___va(x) #endif #endif diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c index 4637fdd469cf..a8ce309ce740 100644 --- a/arch/powerpc/mm/nohash/book3e_pgtable.c +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c @@ -60,7 +60,7 @@ static void __init *early_alloc_pgtable(unsigned long size) if (!ptr) panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n", - __func__, size, size, __pa(MAX_DMA_ADDRESS)); + __func__, size, size, (unsigned long)__pa(MAX_DMA_ADDRESS)); return ptr; } diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index 6dba3b62269f..9a993db88212 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -192,10 +192,10 @@ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, break; if (tpos < m->offset + m->size) { - void *addr; + phys_addr_t addr; tsz = min_t(size_t, m->offset + m->size - tpos, count); - addr = (void *)(m->paddr + tpos - m->offset); + addr = m->paddr + tpos - m->offset; memcpy(to, __va(addr), tsz); to += tsz; tpos += tsz; -- 2.26.2