Re: [PATCH 3/3] arm64: Ask the compiler to __always_inline functions used by KVM at HYP

2020-02-21 Thread Ard Biesheuvel
On Fri, 21 Feb 2020 at 14:13, Will Deacon  wrote:
>
> On Thu, Feb 20, 2020 at 04:58:39PM +, James Morse wrote:
> > KVM uses some of the static-inline helpers like icache_is_vipt() from
> > its HYP code. This assumes the function is inlined so that the code is
> > mapped to EL2. The compiler may decide not to inline these, and the
> > out-of-line version may not be in the __hyp_text section.
> >
> > Add the additional __always_ hint to these static-inlines that are used
> > by KVM.
> >
> > Signed-off-by: James Morse 
> > ---
> >  arch/arm64/include/asm/cache.h  | 2 +-
> >  arch/arm64/include/asm/cacheflush.h | 2 +-
> >  arch/arm64/include/asm/cpufeature.h | 8 
> >  arch/arm64/include/asm/io.h | 4 ++--
> >  4 files changed, 8 insertions(+), 8 deletions(-)
>
> Acked-by: Will Deacon 
>
> It's the right thing to do, but if this stuff keeps trickling in then
> we should make CONFIG_OPTIMIZE_INLINING depend on !ARM64 because seeing
> "__always_inline" tells you nothing about /why/ it needs to be there and
> it's hard to know if/when you can remove those annotations in future.
>

We might need to follow the same approach as we took for the EFI stub,
and create a special __kvm_hyp symbol namespace so that we can
carefully control which routines from the kernel proper it has access
to.
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 3/3] arm64: Ask the compiler to __always_inline functions used by KVM at HYP

2020-02-21 Thread Will Deacon
On Thu, Feb 20, 2020 at 04:58:39PM +, James Morse wrote:
> KVM uses some of the static-inline helpers like icache_is_vipt() from
> its HYP code. This assumes the function is inlined so that the code is
> mapped to EL2. The compiler may decide not to inline these, and the
> out-of-line version may not be in the __hyp_text section.
> 
> Add the additional __always_ hint to these static-inlines that are used
> by KVM.
> 
> Signed-off-by: James Morse 
> ---
>  arch/arm64/include/asm/cache.h  | 2 +-
>  arch/arm64/include/asm/cacheflush.h | 2 +-
>  arch/arm64/include/asm/cpufeature.h | 8 
>  arch/arm64/include/asm/io.h | 4 ++--
>  4 files changed, 8 insertions(+), 8 deletions(-)

Acked-by: Will Deacon 

It's the right thing to do, but if this stuff keeps trickling in then
we should make CONFIG_OPTIMIZE_INLINING depend on !ARM64 because seeing
"__always_inline" tells you nothing about /why/ it needs to be there and
it's hard to know if/when you can remove those annotations in future.

Will
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 3/3] arm64: Ask the compiler to __always_inline functions used by KVM at HYP

2020-02-20 Thread James Morse
KVM uses some of the static-inline helpers like icache_is_vipt() from
its HYP code. This assumes the function is inlined so that the code is
mapped to EL2. The compiler may decide not to inline these, and the
out-of-line version may not be in the __hyp_text section.

Add the additional __always_ hint to these static-inlines that are used
by KVM.

Signed-off-by: James Morse 
---
 arch/arm64/include/asm/cache.h  | 2 +-
 arch/arm64/include/asm/cacheflush.h | 2 +-
 arch/arm64/include/asm/cpufeature.h | 8 
 arch/arm64/include/asm/io.h | 4 ++--
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 806e9dc2a852..a4d1b5f771f6 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags);
 }
 
-static inline int icache_is_vpipt(void)
+static __always_inline int icache_is_vpipt(void)
 {
return test_bit(ICACHEF_VPIPT, &__icache_flags);
 }
diff --git a/arch/arm64/include/asm/cacheflush.h 
b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..e6cca3d4acf7 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, 
struct page *,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
-static inline void __flush_icache_all(void)
+static __always_inline void __flush_icache_all(void)
 {
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
index 42ce41eef274..2a746b99e937 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int 
field)
return cpuid_feature_extract_signed_field_width(features, field, 4);
 }
 
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
 {
return (u64)(features << (64 - width - field)) >> (64 - width);
 }
 
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
 cpuid_feature_extract_unsigned_field(u64 features, int field)
 {
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
return val == 0x1;
 }
 
-static inline bool system_supports_fpsimd(void)
+static __always_inline bool system_supports_fpsimd(void)
 {
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
 }
@@ -575,7 +575,7 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN);
 }
 
-static inline bool system_supports_sve(void)
+static __always_inline bool system_supports_sve(void)
 {
return IS_ENABLED(CONFIG_ARM64_SVE) &&
cpus_have_const_cap(ARM64_SVE);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4e531f57147d..6facd1308e7c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void 
__iomem *addr)
 }
 
 #define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
 {
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
 }
@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem 
*addr)
 }
 
 #define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
 {
u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
-- 
2.24.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm