On Fri, Feb 06, 2026 at 07:47:54AM +0100, Jan Kiszka wrote:
> From: Jan Kiszka <[email protected]>
> 
> Resolves the following lockdep report when booting PREEMPT_RT on Hyper-V
> with related guest support enabled:
> 
> [    1.127941] hv_vmbus: registering driver hyperv_drm
> 
> [    1.132518] =============================
> [    1.132519] [ BUG: Invalid wait context ]
> [    1.132521] 6.19.0-rc8+ #9 Not tainted
> [    1.132524] -----------------------------
> [    1.132525] swapper/0/0 is trying to lock:
> [    1.132526] ffff8b9381bb3c90 (&channel->sched_lock){....}-{3:3}, at: 
> vmbus_chan_sched+0xc4/0x2b0
> [    1.132543] other info that might help us debug this:
> [    1.132544] context-{2:2}
> [    1.132545] 1 lock held by swapper/0/0:
> [    1.132547]  #0: ffffffffa010c4c0 (rcu_read_lock){....}-{1:3}, at: 
> vmbus_chan_sched+0x31/0x2b0
> [    1.132557] stack backtrace:
> [    1.132560] CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted 6.19.0-rc8+ 
> #9 PREEMPT_{RT,(lazy)}
> [    1.132565] Hardware name: Microsoft Corporation Virtual Machine/Virtual 
> Machine, BIOS Hyper-V UEFI Release v4.1 09/25/2025
> [    1.132567] Call Trace:
> [    1.132570]  <IRQ>
> [    1.132573]  dump_stack_lvl+0x6e/0xa0
> [    1.132581]  __lock_acquire+0xee0/0x21b0
> [    1.132592]  lock_acquire+0xd5/0x2d0
> [    1.132598]  ? vmbus_chan_sched+0xc4/0x2b0
> [    1.132606]  ? lock_acquire+0xd5/0x2d0
> [    1.132613]  ? vmbus_chan_sched+0x31/0x2b0
> [    1.132619]  rt_spin_lock+0x3f/0x1f0
> [    1.132623]  ? vmbus_chan_sched+0xc4/0x2b0
> [    1.132629]  ? vmbus_chan_sched+0x31/0x2b0
> [    1.132634]  vmbus_chan_sched+0xc4/0x2b0
> [    1.132641]  vmbus_isr+0x2c/0x150
> [    1.132648]  __sysvec_hyperv_callback+0x5f/0xa0
> [    1.132654]  sysvec_hyperv_callback+0x88/0xb0
> [    1.132658]  </IRQ>
> [    1.132659]  <TASK>
> [    1.132660]  asm_sysvec_hyperv_callback+0x1a/0x20
> 
> As code paths that handle vmbus IRQs use sleepy locks under PREEMPT_RT,
> the complete vmbus_handler execution needs to be moved into thread
> context. Open-coding this allows to skip the IPI that irq_work would
> additionally bring and which we do not need, being an IRQ, never an NMI.
> 
> Signed-off-by: Jan Kiszka <[email protected]>

First I would like to share my opinion that, although support for the
RT kernel is not on the near-term roadmap, we should welcome RT Linux
patches.

Coming back to this patch I can reproduce the stack trace referenced
in the commit when running with PREEMPT_RT enabled, and I have verified
that this patch resolves the issue. Next, I observed the storage-related
stack trace mentioned in Jan’s other patch; applying the storvsc patch
fixed that as well.

However, when testing without PREEMPT_RT enabled, I see a another lockdep
warning below (both with and without Jan’s patches). IWanted to check if
is it possible to address this issue as part of the same fix ?
Doing so would make the change more useful beyond PREEMPT_RT.


> ---
> 
> Changes in v2:
>  - reorder vmbus_irq_pending clearing to fix a race condition
> 
>  arch/x86/kernel/cpu/mshyperv.c | 52 ++++++++++++++++++++++++++++++++--
>  1 file changed, 50 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
> index 579fb2c64cfd..b39cb983326a 100644
> --- a/arch/x86/kernel/cpu/mshyperv.c
> +++ b/arch/x86/kernel/cpu/mshyperv.c
> @@ -17,6 +17,7 @@
>  #include <linux/irq.h>
>  #include <linux/kexec.h>
>  #include <linux/random.h>
> +#include <linux/smpboot.h>
>  #include <asm/processor.h>
>  #include <asm/hypervisor.h>
>  #include <hyperv/hvhdk.h>
> @@ -150,6 +151,43 @@ static void (*hv_stimer0_handler)(void);
>  static void (*hv_kexec_handler)(void);
>  static void (*hv_crash_handler)(struct pt_regs *regs);
>  
> +static DEFINE_PER_CPU(bool, vmbus_irq_pending);
> +static DEFINE_PER_CPU(struct task_struct *, vmbus_irqd);
> +
> +static void vmbus_irqd_wake(void)
> +{
> +     struct task_struct *tsk = __this_cpu_read(vmbus_irqd);
> +
> +     __this_cpu_write(vmbus_irq_pending, true);
> +     wake_up_process(tsk);
> +}
> +
> +static void vmbus_irqd_setup(unsigned int cpu)
> +{
> +     sched_set_fifo(current);
> +}
> +
> +static int vmbus_irqd_should_run(unsigned int cpu)
> +{
> +     return __this_cpu_read(vmbus_irq_pending);
> +}
> +
> +static void run_vmbus_irqd(unsigned int cpu)
> +{
> +     __this_cpu_write(vmbus_irq_pending, false);
> +     vmbus_handler();
> +}
> +
> +static bool vmbus_irq_initialized;
> +
> +static struct smp_hotplug_thread vmbus_irq_threads = {
> +     .store                  = &vmbus_irqd,
> +     .setup                  = vmbus_irqd_setup,
> +     .thread_should_run      = vmbus_irqd_should_run,
> +     .thread_fn              = run_vmbus_irqd,
> +     .thread_comm            = "vmbus_irq/%u",
> +};
> +
>  DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
>  {
>       struct pt_regs *old_regs = set_irq_regs(regs);
> @@ -158,8 +196,12 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
>       if (mshv_handler)
>               mshv_handler();
>  
> -     if (vmbus_handler)
> -             vmbus_handler();
> +     if (vmbus_handler) {
> +             if (IS_ENABLED(CONFIG_PREEMPT_RT))
> +                     vmbus_irqd_wake();
> +             else
> +                     vmbus_handler();
> +     }
>  
>       if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
>               apic_eoi();
> @@ -174,6 +216,10 @@ void hv_setup_mshv_handler(void (*handler)(void))
>  
>  void hv_setup_vmbus_handler(void (*handler)(void))
>  {
> +     if (IS_ENABLED(CONFIG_PREEMPT_RT) && !vmbus_irq_initialized) {
> +             BUG_ON(smpboot_register_percpu_thread(&vmbus_irq_threads));
> +             vmbus_irq_initialized = true;
> +     }
>       vmbus_handler = handler;
>  }
>  
> @@ -181,6 +227,8 @@ void hv_remove_vmbus_handler(void)
>  {
>       /* We have no way to deallocate the interrupt gate */
>       vmbus_handler = NULL;
> +     smpboot_unregister_percpu_thread(&vmbus_irq_threads);

Do we want to safeguard this call only when vmbus_irq_initialized=true ?

- Saurabh

> +     vmbus_irq_initialized = false;
>  }
>  
>  /*
> -- 
> 2.51.0

Reply via email to