Hi Praveen,

kernel test robot noticed the following build errors:

[auto build test ERROR on tip/x86/core]
[also build test ERROR on linus/master v6.18-rc1 next-20251014]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    
https://github.com/intel-lab-lkp/linux/commits/Praveen-K-Paladugu/hyperv-Add-definitions-for-MSHV-sleep-state-configuration/20251015-004650
base:   tip/x86/core
patch link:    
https://lore.kernel.org/r/20251014164150.6935-3-prapal%40linux.microsoft.com
patch subject: [PATCH v2 2/2] hyperv: Enable clean shutdown for root partition 
with MSHV
config: i386-buildonly-randconfig-002-20251015 
(https://download.01.org/0day-ci/archive/20251015/[email protected]/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): 
(https://download.01.org/0day-ci/archive/20251015/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: 
https://lore.kernel.org/oe-kbuild-all/[email protected]/

All error/warnings (new ones prefixed by >>):

   arch/x86/hyperv/hv_init.c: In function 'hyperv_init':
>> arch/x86/hyperv/hv_init.c:556:23: error: implicit declaration of function 
>> 'hv_sleep_notifiers_register'; did you mean 'preempt_notifier_register'? 
>> [-Wimplicit-function-declaration]
     556 |                 (void)hv_sleep_notifiers_register();
         |                       ^~~~~~~~~~~~~~~~~~~~~~~~~~~
         |                       preempt_notifier_register
--
>> drivers/hv/hv_common.c:944:5: warning: no previous prototype for 
>> 'hv_sleep_notifiers_register' [-Wmissing-prototypes]
     944 | int hv_sleep_notifiers_register(void)
         |     ^~~~~~~~~~~~~~~~~~~~~~~~~~~


vim +556 arch/x86/hyperv/hv_init.c

   431  
   432  /*
   433   * This function is to be invoked early in the boot sequence after the
   434   * hypervisor has been detected.
   435   *
   436   * 1. Setup the hypercall page.
   437   * 2. Register Hyper-V specific clocksource.
   438   * 3. Setup Hyper-V specific APIC entry points.
   439   */
   440  void __init hyperv_init(void)
   441  {
   442          u64 guest_id;
   443          union hv_x64_msr_hypercall_contents hypercall_msr;
   444          int cpuhp;
   445  
   446          if (x86_hyper_type != X86_HYPER_MS_HYPERV)
   447                  return;
   448  
   449          if (hv_common_init())
   450                  return;
   451  
   452          /*
   453           * The VP assist page is useless to a TDX guest: the only use we
   454           * would have for it is lazy EOI, which can not be used with 
TDX.
   455           */
   456          if (hv_isolation_type_tdx())
   457                  hv_vp_assist_page = NULL;
   458          else
   459                  hv_vp_assist_page = kcalloc(nr_cpu_ids,
   460                                              sizeof(*hv_vp_assist_page),
   461                                              GFP_KERNEL);
   462          if (!hv_vp_assist_page) {
   463                  ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
   464  
   465                  if (!hv_isolation_type_tdx())
   466                          goto common_free;
   467          }
   468  
   469          if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
   470                  /* Negotiate GHCB Version. */
   471                  if (!hv_ghcb_negotiate_protocol())
   472                          hv_ghcb_terminate(SEV_TERM_SET_GEN,
   473                                            GHCB_SEV_ES_PROT_UNSUPPORTED);
   474  
   475                  hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
   476                  if (!hv_ghcb_pg)
   477                          goto free_vp_assist_page;
   478          }
   479  
   480          cpuhp = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, 
"x86/hyperv_init:online",
   481                                    hv_cpu_init, hv_cpu_die);
   482          if (cpuhp < 0)
   483                  goto free_ghcb_page;
   484  
   485          /*
   486           * Setup the hypercall page and enable hypercalls.
   487           * 1. Register the guest ID
   488           * 2. Enable the hypercall and register the hypercall page
   489           *
   490           * A TDX VM with no paravisor only uses TDX GHCI rather than 
hv_hypercall_pg:
   491           * when the hypercall input is a page, such a VM must pass a 
decrypted
   492           * page to Hyper-V, e.g. hv_post_message() uses the per-CPU page
   493           * hyperv_pcpu_input_arg, which is decrypted if no paravisor is 
present.
   494           *
   495           * A TDX VM with the paravisor uses hv_hypercall_pg for most 
hypercalls,
   496           * which are handled by the paravisor and the VM must use an 
encrypted
   497           * input page: in such a VM, the hyperv_pcpu_input_arg is 
encrypted and
   498           * used in the hypercalls, e.g. see hv_mark_gpa_visibility() and
   499           * hv_arch_irq_unmask(). Such a VM uses TDX GHCI for two 
hypercalls:
   500           * 1. HVCALL_SIGNAL_EVENT: see vmbus_set_event() and 
_hv_do_fast_hypercall8().
   501           * 2. HVCALL_POST_MESSAGE: the input page must be a decrypted 
page, i.e.
   502           * hv_post_message() in such a VM can't use the encrypted 
hyperv_pcpu_input_arg;
   503           * instead, hv_post_message() uses the post_msg_page, which is 
decrypted
   504           * in such a VM and is only used in such a VM.
   505           */
   506          guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
   507          wrmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id);
   508  
   509          /* With the paravisor, the VM must also write the ID via 
GHCB/GHCI */
   510          hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
   511  
   512          /* A TDX VM with no paravisor only uses TDX GHCI rather than 
hv_hypercall_pg */
   513          if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
   514                  goto skip_hypercall_pg_init;
   515  
   516          hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, 
MODULES_VADDR,
   517                          MODULES_END, GFP_KERNEL, PAGE_KERNEL_ROX,
   518                          VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
   519                          __builtin_return_address(0));
   520          if (hv_hypercall_pg == NULL)
   521                  goto clean_guest_os_id;
   522  
   523          rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
   524          hypercall_msr.enable = 1;
   525  
   526          if (hv_root_partition()) {
   527                  struct page *pg;
   528                  void *src;
   529  
   530                  /*
   531                   * For the root partition, the hypervisor will set up 
its
   532                   * hypercall page. The hypervisor guarantees it will 
not show
   533                   * up in the root's address space. The root can't 
change the
   534                   * location of the hypercall page.
   535                   *
   536                   * Order is important here. We must enable the 
hypercall page
   537                   * so it is populated with code, then copy the code to 
an
   538                   * executable page.
   539                   */
   540                  wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
   541  
   542                  pg = vmalloc_to_page(hv_hypercall_pg);
   543                  src = memremap(hypercall_msr.guest_physical_address << 
PAGE_SHIFT, PAGE_SIZE,
   544                                  MEMREMAP_WB);
   545                  BUG_ON(!src);
   546                  memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
   547                  memunmap(src);
   548  
   549                  hv_remap_tsc_clocksource();
   550                  /*
   551                   * The notifier registration might fail at various hops.
   552                   * Corresponding error messages will land in dmesg. 
There is
   553                   * otherwise nothing that can be specifically done to 
handle
   554                   * failures here.
   555                   */
 > 556                  (void)hv_sleep_notifiers_register();
   557          } else {
   558                  hypercall_msr.guest_physical_address = 
vmalloc_to_pfn(hv_hypercall_pg);
   559                  wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
   560          }
   561  
   562          hv_set_hypercall_pg(hv_hypercall_pg);
   563  
   564  skip_hypercall_pg_init:
   565          /*
   566           * hyperv_init() is called before LAPIC is initialized: see
   567           * apic_intr_mode_init() -> x86_platform.apic_post_init() and
   568           * apic_bsp_setup() -> setup_local_APIC(). The direct-mode 
STIMER
   569           * depends on LAPIC, so hv_stimer_alloc() should be called from
   570           * x86_init.timers.setup_percpu_clockev.
   571           */
   572          old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
   573          x86_init.timers.setup_percpu_clockev = 
hv_stimer_setup_percpu_clockev;
   574  
   575          hv_apic_init();
   576  
   577          x86_init.pci.arch_init = hv_pci_init;
   578  
   579          register_syscore_ops(&hv_syscore_ops);
   580  
   581          if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID)
   582                  hv_get_partition_id();
   583  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

Reply via email to