On Mon, Mar 24, 2025, Manali Shukla wrote:
> +     if (vmcb02->save.rip && (svm->nested.ctl.bus_lock_rip == 
> vmcb02->save.rip)) {
> +             vmcb02->control.bus_lock_counter = 1;
> +             svm->bus_lock_rip = svm->nested.ctl.bus_lock_rip;
> +     } else {
> +             vmcb02->control.bus_lock_counter = 0;
> +     }
> +     svm->nested.ctl.bus_lock_rip = INVALID_GPA;
> +
>       /* Done at vmrun: asid.  */
>  
>       /* Also overwritten later if necessary.  */
> @@ -1039,6 +1069,18 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
>  
>       }
>  
> +     /*
> +      * If bus_lock_counter is nonzero and the guest has not moved past the
> +      * guilty instruction, save bus_lock_rip in svm_nested_state. This will
> +      * help determine at nested VMRUN whether to stash vmcb02's counter or
> +      * reset it to '0'.
> +      */
> +     if (vmcb02->control.bus_lock_counter &&
> +         svm->bus_lock_rip == vmcb02->save.rip)
> +             svm->nested.ctl.bus_lock_rip = svm->bus_lock_rip;
> +     else
> +             svm->nested.ctl.bus_lock_rip = INVALID_GPA;
> +
>       nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
>  
>       svm_switch_vmcb(svm, &svm->vmcb01);

...

> +static int bus_lock_exit(struct kvm_vcpu *vcpu)
> +{
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
> +     vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
> +
> +     vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
> +     svm->bus_lock_rip = vcpu->arch.cui_linear_rip;
> +     vcpu->arch.complete_userspace_io = complete_userspace_buslock;
> +
> +     return 0;
> +}

> @@ -327,6 +328,7 @@ struct vcpu_svm {
>  
>       /* Guest GIF value, used when vGIF is not enabled */
>       bool guest_gif;
> +     u64 bus_lock_rip;

I don't think this field is necessary.  Rather than unconditionally invalidate
on nested VMRUN and then conditionally restore on nested #VMEXIT, just leave
svm->nested.ctl.bus_lock_rip set on VMRUN and conditionally invalidate on 
#VMEXIT.
And then in bus_lock_exit(), update the field if the exit occurred while L2 is
active.

Completely untested:

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index a42ef7dd9143..98e065a93516 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -700,13 +700,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm 
*svm,
         * L1 re-enters L2, the same instruction will trigger a VM-Exit and the
         * entire cycle start over.
         */
-       if (vmcb02->save.rip && (svm->nested.ctl.bus_lock_rip == 
vmcb02->save.rip)) {
+       if (vmcb02->save.rip && (svm->nested.ctl.bus_lock_rip == 
vmcb02->save.rip))
                vmcb02->control.bus_lock_counter = 1;
-               svm->bus_lock_rip = svm->nested.ctl.bus_lock_rip;
-       } else {
+       else
                vmcb02->control.bus_lock_counter = 0;
-       }
-       svm->nested.ctl.bus_lock_rip = INVALID_GPA;
 
        /* Done at vmrun: asid.  */
 
@@ -1070,15 +1067,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        }
 
        /*
-        * If bus_lock_counter is nonzero and the guest has not moved past the
-        * guilty instruction, save bus_lock_rip in svm_nested_state. This will
-        * help determine at nested VMRUN whether to stash vmcb02's counter or
-        * reset it to '0'.
+        * Invalidate bus_lock_rip unless kVM is still waiting for the guest
+        * to make forward progress before re-enabling bus lock detection.
         */
-       if (vmcb02->control.bus_lock_counter &&
-           svm->bus_lock_rip == vmcb02->save.rip)
-               svm->nested.ctl.bus_lock_rip = svm->bus_lock_rip;
-       else
+       if (!vmcb02->control.bus_lock_counter)
                svm->nested.ctl.bus_lock_rip = INVALID_GPA;
 
        nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index ea12e93ae983..11ce031323fd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3333,9 +3333,10 @@ static int bus_lock_exit(struct kvm_vcpu *vcpu)
        vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
 
        vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
-       svm->bus_lock_rip = vcpu->arch.cui_linear_rip;
        vcpu->arch.complete_userspace_io = complete_userspace_buslock;
 
+       if (is_guest_mode(vcpu))
+               svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip;
        return 0;
 }
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 7a4c5848c952..8667faccaedc 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -328,7 +328,6 @@ struct vcpu_svm {
 
        /* Guest GIF value, used when vGIF is not enabled */
        bool guest_gif;
-       u64 bus_lock_rip;
 };
 
 struct svm_cpu_data {


Reply via email to