Re: [PATCH 3/3] ARM: Use asm_inline for ALTERNATIVE()
On Thu, 15 May 2025, Andrew Cooper wrote: > ... when there really are only a few instructions in line. > > In some cases, reformat to reduce left-hand margine space. > > No functional change. > > Signed-off-by: Andrew Cooper Reviewed-by: Stefano Stabellini > --- > CC: Jan Beulich > CC: Roger Pau Monné > CC: Stefano Stabellini > CC: Julien Grall > CC: Volodymyr Babchuk > CC: Bertrand Marquis > CC: Michal Orzel > > v2: > * New, split out of previous single patch > --- > xen/arch/arm/include/asm/alternative.h| 4 +-- > xen/arch/arm/include/asm/arm64/flushtlb.h | 4 +-- > xen/arch/arm/include/asm/arm64/io.h | 43 ++- > xen/arch/arm/include/asm/cpuerrata.h | 8 ++--- > xen/arch/arm/include/asm/cpufeature.h | 8 ++--- > xen/arch/arm/include/asm/page.h | 12 --- > xen/arch/arm/include/asm/processor.h | 7 ++-- > xen/arch/arm/include/asm/sysregs.h| 10 +++--- > xen/arch/arm/mmu/p2m.c| 3 +- > 9 files changed, 58 insertions(+), 41 deletions(-) > > diff --git a/xen/arch/arm/include/asm/alternative.h > b/xen/arch/arm/include/asm/alternative.h > index 22477d9497a3..1563f03a0f5a 100644 > --- a/xen/arch/arm/include/asm/alternative.h > +++ b/xen/arch/arm/include/asm/alternative.h > @@ -209,9 +209,9 @@ alternative_endif > #endif /* __ASSEMBLY__ */ > > /* > - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); > + * Usage: asm_inline (ALTERNATIVE(oldinstr, newinstr, feature)); > * > - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); > + * Usage: asm_inline (ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); > * N.B. If CONFIG_FOO is specified, but not selected, the whole block > * will be omitted, including oldinstr. > */ > diff --git a/xen/arch/arm/include/asm/arm64/flushtlb.h > b/xen/arch/arm/include/asm/arm64/flushtlb.h > index 45642201d147..3b99c11b50d1 100644 > --- a/xen/arch/arm/include/asm/arm64/flushtlb.h > +++ b/xen/arch/arm/include/asm/arm64/flushtlb.h > @@ -31,7 +31,7 @@ > #define TLB_HELPER(name, tlbop, sh) \ > static inline void name(void)\ > {\ > -asm volatile(\ > +asm_inline volatile (\ > "dsb " # sh "st;" \ > "tlbi " # tlbop ";"\ > ALTERNATIVE( \ > @@ -55,7 +55,7 @@ static inline void name(void)\ > #define TLB_HELPER_VA(name, tlbop) \ > static inline void name(vaddr_t va) \ > {\ > -asm volatile(\ > +asm_inline volatile (\ > "tlbi " # tlbop ", %0;"\ > ALTERNATIVE( \ > "nop; nop;", \ > diff --git a/xen/arch/arm/include/asm/arm64/io.h > b/xen/arch/arm/include/asm/arm64/io.h > index 7d5959877759..ac90b729c44d 100644 > --- a/xen/arch/arm/include/asm/arm64/io.h > +++ b/xen/arch/arm/include/asm/arm64/io.h > @@ -51,40 +51,51 @@ static inline void __raw_writeq(u64 val, volatile void > __iomem *addr) > static inline u8 __raw_readb(const volatile void __iomem *addr) > { > u8 val; > -asm volatile(ALTERNATIVE("ldrb %w0, [%1]", > - "ldarb %w0, [%1]", > - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) > - : "=r" (val) : "r" (addr)); > + > +asm_inline volatile ( > +ALTERNATIVE("ldrb %w0, [%1]", > +"ldarb %w0, [%1]", > +ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) > +: "=r" (val) : "r" (addr) ); > + > return val; > } > > static inline u16 __raw_readw(const volatile void __iomem *addr) > { > u16 val; > -asm volatile(ALTERNATIVE("ldrh %w0, [%1]", > - "ldarh %w0, [%1]", > - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) > - : "=r" (val) : "r" (addr)); > +asm_inline volatile ( > +ALTERNATIVE("ldrh %w0, [%1]", > +"ldarh %w0, [%1]", > +ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) > +: "=r" (val) : "r" (addr) ); > + > return val; > } > > static inline u32 __raw_readl(const volatile void __iomem *addr) > { > u32 val; > -asm volatile(ALTERNATIVE("ldr %w0, [%1]", > - "ldar %w0, [%1]", > - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) > - : "=r" (val) : "r" (addr)); > + > +asm_inline volatile ( > +ALTERNATIVE("ldr %w0, [%1]", > +"ldar %w0, [%1]", > +ARM64_WORKAROU
[PATCH 3/3] ARM: Use asm_inline for ALTERNATIVE()
... when there really are only a few instructions in line. In some cases, reformat to reduce left-hand margine space. No functional change. Signed-off-by: Andrew Cooper --- CC: Jan Beulich CC: Roger Pau Monné CC: Stefano Stabellini CC: Julien Grall CC: Volodymyr Babchuk CC: Bertrand Marquis CC: Michal Orzel v2: * New, split out of previous single patch --- xen/arch/arm/include/asm/alternative.h| 4 +-- xen/arch/arm/include/asm/arm64/flushtlb.h | 4 +-- xen/arch/arm/include/asm/arm64/io.h | 43 ++- xen/arch/arm/include/asm/cpuerrata.h | 8 ++--- xen/arch/arm/include/asm/cpufeature.h | 8 ++--- xen/arch/arm/include/asm/page.h | 12 --- xen/arch/arm/include/asm/processor.h | 7 ++-- xen/arch/arm/include/asm/sysregs.h| 10 +++--- xen/arch/arm/mmu/p2m.c| 3 +- 9 files changed, 58 insertions(+), 41 deletions(-) diff --git a/xen/arch/arm/include/asm/alternative.h b/xen/arch/arm/include/asm/alternative.h index 22477d9497a3..1563f03a0f5a 100644 --- a/xen/arch/arm/include/asm/alternative.h +++ b/xen/arch/arm/include/asm/alternative.h @@ -209,9 +209,9 @@ alternative_endif #endif /* __ASSEMBLY__ */ /* - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); + * Usage: asm_inline (ALTERNATIVE(oldinstr, newinstr, feature)); * - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); + * Usage: asm_inline (ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); * N.B. If CONFIG_FOO is specified, but not selected, the whole block * will be omitted, including oldinstr. */ diff --git a/xen/arch/arm/include/asm/arm64/flushtlb.h b/xen/arch/arm/include/asm/arm64/flushtlb.h index 45642201d147..3b99c11b50d1 100644 --- a/xen/arch/arm/include/asm/arm64/flushtlb.h +++ b/xen/arch/arm/include/asm/arm64/flushtlb.h @@ -31,7 +31,7 @@ #define TLB_HELPER(name, tlbop, sh) \ static inline void name(void)\ {\ -asm volatile(\ +asm_inline volatile (\ "dsb " # sh "st;" \ "tlbi " # tlbop ";"\ ALTERNATIVE( \ @@ -55,7 +55,7 @@ static inline void name(void)\ #define TLB_HELPER_VA(name, tlbop) \ static inline void name(vaddr_t va) \ {\ -asm volatile(\ +asm_inline volatile (\ "tlbi " # tlbop ", %0;"\ ALTERNATIVE( \ "nop; nop;", \ diff --git a/xen/arch/arm/include/asm/arm64/io.h b/xen/arch/arm/include/asm/arm64/io.h index 7d5959877759..ac90b729c44d 100644 --- a/xen/arch/arm/include/asm/arm64/io.h +++ b/xen/arch/arm/include/asm/arm64/io.h @@ -51,40 +51,51 @@ static inline void __raw_writeq(u64 val, volatile void __iomem *addr) static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; -asm volatile(ALTERNATIVE("ldrb %w0, [%1]", - "ldarb %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); + +asm_inline volatile ( +ALTERNATIVE("ldrb %w0, [%1]", +"ldarb %w0, [%1]", +ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) +: "=r" (val) : "r" (addr) ); + return val; } static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; -asm volatile(ALTERNATIVE("ldrh %w0, [%1]", - "ldarh %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); +asm_inline volatile ( +ALTERNATIVE("ldrh %w0, [%1]", +"ldarh %w0, [%1]", +ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) +: "=r" (val) : "r" (addr) ); + return val; } static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; -asm volatile(ALTERNATIVE("ldr %w0, [%1]", - "ldar %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); + +asm_inline volatile ( +ALTERNATIVE("ldr %w0, [%1]", +"ldar %w0, [%1]", +ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) +: "=r" (val) : "r" (addr) ); + return val; } static inline u64 __raw_readq(const volatile void __iomem *addr) { u64 val; -asm volatile(ALTERNATIVE("ldr %0, [%1]", - "ldar %0, [%1]", - ARM64_