On 9/16/25 12:32 PM, Grygorii Strashko wrote:
From: Grygorii Strashko<grygorii_stras...@epam.com>

Since commit b99227347230 ("x86: Fix AMD_SVM and INTEL_VMX dependency") the
AMD-V support can be gracefully disabled, but it still keeps SVM
code partially built-in, because HVM code uses mix of:

- "cpu_has_svm" macro, which doesn't account for CONFIG_AMD_SVM cfg
- "using_svm()" function, which accounts for CONFIG_AMD_SVM cfg

for runtime SVM availability checking. As result compiler DCE can't remove
all, unreachable SVM code.

Fix it by sticking to "cpu_has_svm" macro usage only which is updated to
account CONFIG_AMD_SVM cfg.

Signed-off-by: Grygorii Strashko<grygorii_stras...@epam.com>
---
Hi

It could be good to have it in 4.21.

bloat-o-meter:
add/remove: 0/0 grow/shrink: 0/3 up/down: 0/-98 (-98)
Function                                     old     new   delta
guest_flush_tlb_flags                         71      62      -9
init_speculation_mitigations               10024   10011     -13
hvm_set_efer                                 364     288     -76
Total: Before=3656835, After=3656737, chg -0.00%

It doesn't seem critical for the current release stage, so let's consider these
changes for 4.22.

Thanks.

~ Oleksii


  xen/arch/x86/domain.c                 | 4 ++--
  xen/arch/x86/hvm/hvm.c                | 2 +-
  xen/arch/x86/hvm/nestedhvm.c          | 2 +-
  xen/arch/x86/include/asm/cpufeature.h | 3 ++-
  xen/arch/x86/include/asm/hvm/hvm.h    | 5 -----
  5 files changed, 6 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 19fd86ce88d2..92661527eb75 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1824,7 +1824,7 @@ static void load_segments(struct vcpu *n)
          if ( !(n->arch.flags & TF_kernel_mode) )
              SWAP(gsb, gss);
- if ( using_svm() && (n->arch.pv.fs | n->arch.pv.gs) <= 3 )
+        if ( cpu_has_svm && (n->arch.pv.fs | n->arch.pv.gs) <= 3 )
              fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n),
                                         n->arch.pv.fs_base, gsb, gss);
      }
@@ -2142,7 +2142,7 @@ static void __context_switch(void)
#ifdef CONFIG_PV
      /* Prefetch the VMCB if we expect to use it later in the context switch */
-    if ( using_svm() && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
+    if ( cpu_has_svm && is_pv_64bit_domain(nd) && !is_idle_domain(nd) )
          svm_load_segs_prefetch();
  #endif
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 57d09e02ed0f..330103ddf386 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -160,7 +160,7 @@ static int __init cf_check hvm_enable(void)
if ( cpu_has_vmx )
          fns = start_vmx();
-    else if ( using_svm() )
+    else if ( cpu_has_svm )
          fns = start_svm();
if ( fns )
diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c
index c6329ba2e51a..d895a738448c 100644
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -157,7 +157,7 @@ static int __init cf_check nestedhvm_setup(void)
       */
      if ( cpu_has_vmx )
          start_nested_vmx(&hvm_funcs);
-    else if ( using_svm() )
+    else if ( cpu_has_svm )
          start_nested_svm(&hvm_funcs);
return 0;
diff --git a/xen/arch/x86/include/asm/cpufeature.h 
b/xen/arch/x86/include/asm/cpufeature.h
index f42e95586966..ce7dc1ddad0a 100644
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -165,7 +165,8 @@ static inline bool boot_cpu_has(unsigned int feat)
/* CPUID level 0x80000001.ecx */
  #define cpu_has_cmp_legacy      boot_cpu_has(X86_FEATURE_CMP_LEGACY)
-#define cpu_has_svm             boot_cpu_has(X86_FEATURE_SVM)
+#define cpu_has_svm             (IS_ENABLED(CONFIG_AMD_SVM) && \
+                                 boot_cpu_has(X86_FEATURE_SVM))
  #define cpu_has_sse4a           boot_cpu_has(X86_FEATURE_SSE4A)
  #define cpu_has_xop             boot_cpu_has(X86_FEATURE_XOP)
  #define cpu_has_skinit          boot_cpu_has(X86_FEATURE_SKINIT)
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index 0fa9e3c21598..24a7ed88567b 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -383,11 +383,6 @@ int hvm_copy_context_and_params(struct domain *dst, struct 
domain *src);
int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value); -static inline bool using_svm(void)
-{
-    return IS_ENABLED(CONFIG_AMD_SVM) && cpu_has_svm;
-}
-
  #ifdef CONFIG_HVM
#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)

Reply via email to