From: Naman Jain <[email protected]> Sent: Monday, March 16, 2026 
5:13 AM
> 
> Move mshv_vtl_configure_reg_page() implementation from
> drivers/hv/mshv_vtl_main.c to arch-specific files:
> - arch/x86/hyperv/hv_vtl.c: full implementation with register page setup
> - arch/arm64/hyperv/hv_vtl.c: stub implementation (unsupported)
> 
> Move common type definitions to include/asm-generic/mshyperv.h:
> - struct mshv_vtl_per_cpu
> - union hv_synic_overlay_page_msr
> 
> Move hv_call_get_vp_registers() and hv_call_set_vp_registers()
> declarations to include/asm-generic/mshyperv.h since these functions
> are used by multiple modules.
> 
> While at it, remove the unnecessary stub implementations in #else
> case for mshv_vtl_return* functions in arch/x86/include/asm/mshyperv.h.

Seems like this patch is doing multiple things. The reg page configuration
changes are more substantial and should probably be in a patch by
themselves. The other changes are more trivial and maybe are OK
grouped into a single patch, but you could also consider breaking them
out.

> 
> This is essential for adding support for ARM64 in MSHV_VTL.
> 
> Signed-off-by: Naman Jain <[email protected]>
> ---
>  arch/arm64/hyperv/hv_vtl.c        |  8 +++++
>  arch/arm64/include/asm/mshyperv.h |  3 ++
>  arch/x86/hyperv/hv_vtl.c          | 32 ++++++++++++++++++++
>  arch/x86/include/asm/mshyperv.h   |  7 ++---
>  drivers/hv/mshv.h                 |  8 -----
>  drivers/hv/mshv_vtl_main.c        | 49 +++----------------------------
>  include/asm-generic/mshyperv.h    | 42 ++++++++++++++++++++++++++
>  7 files changed, 92 insertions(+), 57 deletions(-)
> 
> diff --git a/arch/arm64/hyperv/hv_vtl.c b/arch/arm64/hyperv/hv_vtl.c
> index 66318672c242..d699138427c1 100644
> --- a/arch/arm64/hyperv/hv_vtl.c
> +++ b/arch/arm64/hyperv/hv_vtl.c
> @@ -10,6 +10,7 @@
>  #include <asm/boot.h>
>  #include <asm/mshyperv.h>
>  #include <asm/cpu_ops.h>
> +#include <linux/export.h>
> 
>  void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0)
>  {
> @@ -142,3 +143,10 @@ void mshv_vtl_return_call(struct mshv_vtl_cpu_context 
> *vtl0)
>               "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
>  }
>  EXPORT_SYMBOL(mshv_vtl_return_call);
> +
> +bool hv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu)
> +{
> +     pr_debug("Register page not supported on ARM64\n");
> +     return false;
> +}
> +EXPORT_SYMBOL_GPL(hv_vtl_configure_reg_page);
> diff --git a/arch/arm64/include/asm/mshyperv.h
> b/arch/arm64/include/asm/mshyperv.h
> index de7f3a41a8ea..36803f0386cc 100644
> --- a/arch/arm64/include/asm/mshyperv.h
> +++ b/arch/arm64/include/asm/mshyperv.h
> @@ -61,6 +61,8 @@ static inline u64 hv_get_non_nested_msr(unsigned int reg)
>                               ARM_SMCCC_OWNER_VENDOR_HYP,     \
>                               HV_SMCCC_FUNC_NUMBER)
> 
> +struct mshv_vtl_per_cpu;
> +
>  struct mshv_vtl_cpu_context {
>  /*
>   * NOTE: x18 is managed by the hypervisor. It won't be reloaded from this 
> array.
> @@ -82,6 +84,7 @@ static inline int hv_vtl_get_set_reg(struct 
> hv_register_assoc *regs,
> bool set, u
>  }
> 
>  void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0);
> +bool hv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu);

I think this declaration could be added in asm-generic/mshyperv.h so that it
is shared by x86 and arm64. That also obviates the need for the forward
ref to struct mshv_vtl_per_cpu that you've added here.

>  #endif
> 
>  #include <asm-generic/mshyperv.h>
> diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
> index 72a0bb4ae0c7..ede290985d41 100644
> --- a/arch/x86/hyperv/hv_vtl.c
> +++ b/arch/x86/hyperv/hv_vtl.c
> @@ -20,6 +20,7 @@
>  #include <uapi/asm/mtrr.h>
>  #include <asm/debugreg.h>
>  #include <linux/export.h>
> +#include <linux/hyperv.h>
>  #include <../kernel/smpboot.h>
>  #include "../../kernel/fpu/legacy.h"
> 
> @@ -259,6 +260,37 @@ int __init hv_vtl_early_init(void)
>       return 0;
>  }
> 
> +static const union hv_input_vtl input_vtl_zero;
> +
> +bool hv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu)
> +{
> +     struct hv_register_assoc reg_assoc = {};
> +     union hv_synic_overlay_page_msr overlay = {};
> +     struct page *reg_page;
> +
> +     reg_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL);
> +     if (!reg_page) {
> +             WARN(1, "failed to allocate register page\n");
> +             return false;
> +     }
> +
> +     overlay.enabled = 1;
> +     overlay.pfn = page_to_hvpfn(reg_page);
> +     reg_assoc.name = HV_X64_REGISTER_REG_PAGE;
> +     reg_assoc.value.reg64 = overlay.as_uint64;
> +
> +     if (hv_call_set_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
> +                                  1, input_vtl_zero, &reg_assoc)) {
> +             WARN(1, "failed to setup register page\n");
> +             __free_page(reg_page);
> +             return false;
> +     }
> +
> +     per_cpu->reg_page = reg_page;
> +     return true;

As Sashiko AI noted, the memory allocated for the reg_page never gets freed.

> +}
> +EXPORT_SYMBOL_GPL(hv_vtl_configure_reg_page);
> +
>  DEFINE_STATIC_CALL_NULL(__mshv_vtl_return_hypercall, void (*)(void));
> 
>  void mshv_vtl_return_call_init(u64 vtl_return_offset)
> diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
> index d5355a5b7517..d592fea49cdb 100644
> --- a/arch/x86/include/asm/mshyperv.h
> +++ b/arch/x86/include/asm/mshyperv.h
> @@ -271,6 +271,8 @@ static inline u64 hv_get_non_nested_msr(unsigned int reg) 
> {
> return 0; }
>  static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; }
>  #endif /* CONFIG_HYPERV */
> 
> +struct mshv_vtl_per_cpu;
> +
>  struct mshv_vtl_cpu_context {
>       union {
>               struct {
> @@ -305,13 +307,10 @@ void mshv_vtl_return_call_init(u64 vtl_return_offset);
>  void mshv_vtl_return_hypercall(void);
>  void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0);
>  int hv_vtl_get_set_reg(struct hv_register_assoc *regs, bool set, u64 shared);
> +bool hv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu);

Same as for arm64. Add a shared declaration in asm-generic/mshyperv.h.

>  #else
>  static inline void __init hv_vtl_init_platform(void) {}
>  static inline int __init hv_vtl_early_init(void) { return 0; }
> -static inline void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {}
> -static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {}
> -static inline void mshv_vtl_return_hypercall(void) {}
> -static inline void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) 
> {}
>  #endif
> 
>  #include <asm-generic/mshyperv.h>
> diff --git a/drivers/hv/mshv.h b/drivers/hv/mshv.h
> index d4813df92b9c..0fcb7f9ba6a9 100644
> --- a/drivers/hv/mshv.h
> +++ b/drivers/hv/mshv.h
> @@ -14,14 +14,6 @@
>       memchr_inv(&((STRUCT).MEMBER), \
>                  0, sizeof_field(typeof(STRUCT), MEMBER))
> 
> -int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
> -                          union hv_input_vtl input_vtl,
> -                          struct hv_register_assoc *registers);
> -
> -int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
> -                          union hv_input_vtl input_vtl,
> -                          struct hv_register_assoc *registers);
> -
>  int hv_call_get_partition_property(u64 partition_id, u64 property_code,
>                                  u64 *property_value);
> 
> diff --git a/drivers/hv/mshv_vtl_main.c b/drivers/hv/mshv_vtl_main.c
> index 91517b45d526..c79d24317b8e 100644
> --- a/drivers/hv/mshv_vtl_main.c
> +++ b/drivers/hv/mshv_vtl_main.c
> @@ -78,21 +78,6 @@ struct mshv_vtl {
>       u64 id;
>  };
> 
> -struct mshv_vtl_per_cpu {
> -     struct mshv_vtl_run *run;
> -     struct page *reg_page;
> -};
> -
> -/* SYNIC_OVERLAY_PAGE_MSR - internal, identical to hv_synic_simp */
> -union hv_synic_overlay_page_msr {
> -     u64 as_uint64;
> -     struct {
> -             u64 enabled: 1;
> -             u64 reserved: 11;
> -             u64 pfn: 52;
> -     } __packed;
> -};
> -
>  static struct mutex mshv_vtl_poll_file_lock;
>  static union hv_register_vsm_page_offsets mshv_vsm_page_offsets;
>  static union hv_register_vsm_capabilities mshv_vsm_capabilities;
> @@ -201,34 +186,6 @@ static struct page *mshv_vtl_cpu_reg_page(int cpu)
>       return *per_cpu_ptr(&mshv_vtl_per_cpu.reg_page, cpu);
>  }
> 
> -static void mshv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu)
> -{
> -     struct hv_register_assoc reg_assoc = {};
> -     union hv_synic_overlay_page_msr overlay = {};
> -     struct page *reg_page;
> -
> -     reg_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL);
> -     if (!reg_page) {
> -             WARN(1, "failed to allocate register page\n");
> -             return;
> -     }
> -
> -     overlay.enabled = 1;
> -     overlay.pfn = page_to_hvpfn(reg_page);
> -     reg_assoc.name = HV_X64_REGISTER_REG_PAGE;
> -     reg_assoc.value.reg64 = overlay.as_uint64;
> -
> -     if (hv_call_set_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
> -                                  1, input_vtl_zero, &reg_assoc)) {
> -             WARN(1, "failed to setup register page\n");
> -             __free_page(reg_page);
> -             return;
> -     }
> -
> -     per_cpu->reg_page = reg_page;
> -     mshv_has_reg_page = true;
> -}
> -
>  static void mshv_vtl_synic_enable_regs(unsigned int cpu)
>  {
>       union hv_synic_sint sint;
> @@ -329,8 +286,10 @@ static int mshv_vtl_alloc_context(unsigned int cpu)
>       if (!per_cpu->run)
>               return -ENOMEM;
> 
> -     if (mshv_vsm_capabilities.intercept_page_available)
> -             mshv_vtl_configure_reg_page(per_cpu);
> +     if (mshv_vsm_capabilities.intercept_page_available) {
> +             if (hv_vtl_configure_reg_page(per_cpu))
> +                     mshv_has_reg_page = true;

As Sashiko AI noted, it doesn't work to use the global mshv_has_reg_page
to indicate the success of configuring the reg page, which is a per-cpu
operation. But this bug existed before this patch set, so maybe it should
be fixed as a preliminary patch.

> +     }
> 
>       mshv_vtl_synic_enable_regs(cpu);
> 
> diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
> index b147a12085e4..b53fcc071596 100644
> --- a/include/asm-generic/mshyperv.h
> +++ b/include/asm-generic/mshyperv.h
> @@ -383,8 +383,50 @@ static inline int hv_deposit_memory(u64 partition_id, 
> u64 status)
>       return hv_deposit_memory_node(NUMA_NO_NODE, partition_id, status);
>  }
> 
> +#if IS_ENABLED(CONFIG_MSHV_ROOT) || IS_ENABLED(CONFIG_MSHV_VTL)
> +int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
> +                          union hv_input_vtl input_vtl,
> +                          struct hv_register_assoc *registers);
> +
> +int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
> +                          union hv_input_vtl input_vtl,
> +                          struct hv_register_assoc *registers);
> +#else
> +static inline int hv_call_get_vp_registers(u32 vp_index, u64 partition_id,
> +                                        u16 count,
> +                                        union hv_input_vtl input_vtl,
> +                                        struct hv_register_assoc *registers)
> +{
> +     return -EOPNOTSUPP;
> +}
> +
> +static inline int hv_call_set_vp_registers(u32 vp_index, u64 partition_id,
> +                                        u16 count,
> +                                        union hv_input_vtl input_vtl,
> +                                        struct hv_register_assoc *registers)
> +{
> +     return -EOPNOTSUPP;
> +}
> +#endif /* CONFIG_MSHV_ROOT || CONFIG_MSHV_VTL */
> +
>  #define HV_VP_ASSIST_PAGE_ADDRESS_SHIFT      12
> +
>  #if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
> +struct mshv_vtl_per_cpu {
> +     struct mshv_vtl_run *run;
> +     struct page *reg_page;
> +};
> +
> +/* SYNIC_OVERLAY_PAGE_MSR - internal, identical to hv_synic_simp */
> +union hv_synic_overlay_page_msr {
> +     u64 as_uint64;
> +     struct {
> +             u64 enabled: 1;
> +             u64 reserved: 11;
> +             u64 pfn: 52;
> +     } __packed;
> +};
> +
>  u8 __init get_vtl(void);
>  #else
>  static inline u8 get_vtl(void) { return 0; }
> --
> 2.43.0
> 

Sashiko AI noted another existing bug in mshv_vtl_init(), which is that
the error path does kfree(mem_dev) when it should do
put_device(mem_dev).  See the comment in the header of
device_initialize().


Reply via email to