Modify intercept_exceptions to generic intercepts in vmcb_control_area. Use
the generic vmcb_set_intercept, vmcb_clr_intercept and vmcb_is_intercept to
set/clear/test the intercept_exceptions bits.

Signed-off-by: Babu Moger <babu.mo...@amd.com>
Reviewed-by: Jim Mattson <jmatt...@google.com>
---
 arch/x86/include/asm/svm.h |   22 +++++++++++++++++++++-
 arch/x86/kvm/svm/nested.c  |   12 +++++-------
 arch/x86/kvm/svm/svm.c     |   22 +++++++++++-----------
 arch/x86/kvm/svm/svm.h     |    4 ++--
 4 files changed, 39 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index ffc89d8e4fcb..51833a611eba 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -3,6 +3,7 @@
 #define __SVM_H
 
 #include <uapi/asm/svm.h>
+#include <uapi/asm/kvm.h>
 
 /*
  * VMCB Control Area intercept bits starting
@@ -12,6 +13,7 @@
 enum vector_offset {
        CR_VECTOR = 0,
        DR_VECTOR,
+       EXCEPTION_VECTOR,
        MAX_VECTORS,
 };
 
@@ -52,6 +54,25 @@ enum {
        INTERCEPT_DR5_WRITE,
        INTERCEPT_DR6_WRITE,
        INTERCEPT_DR7_WRITE,
+       /* Byte offset 008h (Vector 2) */
+       INTERCEPT_DE_VECTOR = 64 + DE_VECTOR,
+       INTERCEPT_DB_VECTOR = 64 + DB_VECTOR,
+       INTERCEPT_BP_VECTOR = 64 + BP_VECTOR,
+       INTERCEPT_OF_VECTOR = 64 + OF_VECTOR,
+       INTERCEPT_BR_VECTOR = 64 + BR_VECTOR,
+       INTERCEPT_UD_VECTOR = 64 + UD_VECTOR,
+       INTERCEPT_NM_VECTOR = 64 + NM_VECTOR,
+       INTERCEPT_DF_VECTOR = 64 + DF_VECTOR,
+       INTERCEPT_TS_VECTOR = 64 + TS_VECTOR,
+       INTERCEPT_NP_VECTOR = 64 + NP_VECTOR,
+       INTERCEPT_SS_VECTOR = 64 + SS_VECTOR,
+       INTERCEPT_GP_VECTOR = 64 + GP_VECTOR,
+       INTERCEPT_PF_VECTOR = 64 + PF_VECTOR,
+       INTERCEPT_MF_VECTOR = 64 + MF_VECTOR,
+       INTERCEPT_AC_VECTOR = 64 + AC_VECTOR,
+       INTERCEPT_MC_VECTOR = 64 + MC_VECTOR,
+       INTERCEPT_XM_VECTOR = 64 + XM_VECTOR,
+       INTERCEPT_VE_VECTOR = 64 + VE_VECTOR,
 };
 
 enum {
@@ -107,7 +128,6 @@ enum {
 
 struct __attribute__ ((__packed__)) vmcb_control_area {
        u32 intercepts[MAX_VECTORS];
-       u32 intercept_exceptions;
        u64 intercept;
        u8 reserved_1[40];
        u16 pause_filter_thresh;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index ba11fc3bf843..c161bd38f401 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -109,12 +109,11 @@ void recalc_intercepts(struct vcpu_svm *svm)
        h = &svm->nested.hsave->control;
        g = &svm->nested.ctl;
 
-       svm->nested.host_intercept_exceptions = h->intercept_exceptions;
+       svm->nested.host_intercept_exceptions = h->intercepts[EXCEPTION_VECTOR];
 
        for (i = 0; i < MAX_VECTORS; i++)
                c->intercepts[i] = h->intercepts[i];
 
-       c->intercept_exceptions = h->intercept_exceptions;
        c->intercept = h->intercept;
 
        if (g->int_ctl & V_INTR_MASKING_MASK) {
@@ -136,7 +135,6 @@ void recalc_intercepts(struct vcpu_svm *svm)
        for (i = 0; i < MAX_VECTORS; i++)
                c->intercepts[i] |= g->intercepts[i];
 
-       c->intercept_exceptions |= g->intercept_exceptions;
        c->intercept |= g->intercept;
 }
 
@@ -148,7 +146,6 @@ static void copy_vmcb_control_area(struct vmcb_control_area 
*dst,
        for (i = 0; i < MAX_VECTORS; i++)
                dst->intercepts[i] = from->intercepts[i];
 
-       dst->intercept_exceptions = from->intercept_exceptions;
        dst->intercept            = from->intercept;
        dst->iopm_base_pa         = from->iopm_base_pa;
        dst->msrpm_base_pa        = from->msrpm_base_pa;
@@ -495,7 +492,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 
        trace_kvm_nested_intercepts(nested_vmcb->control.intercepts[CR_VECTOR] 
& 0xffff,
                                    nested_vmcb->control.intercepts[CR_VECTOR] 
>> 16,
-                                   nested_vmcb->control.intercept_exceptions,
+                                   
nested_vmcb->control.intercepts[EXCEPTION_VECTOR],
                                    nested_vmcb->control.intercept);
 
        /* Clear internal status */
@@ -835,7 +832,7 @@ static bool nested_exit_on_exception(struct vcpu_svm *svm)
 {
        unsigned int nr = svm->vcpu.arch.exception.nr;
 
-       return (svm->nested.ctl.intercept_exceptions & (1 << nr));
+       return (svm->nested.ctl.intercepts[EXCEPTION_VECTOR] & BIT(nr));
 }
 
 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
@@ -984,7 +981,8 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
        case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
                u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
 
-               if (get_host_vmcb(svm)->control.intercept_exceptions & 
excp_bits)
+               if (get_host_vmcb(svm)->control.intercepts[EXCEPTION_VECTOR] &
+                               excp_bits)
                        return NESTED_EXIT_HOST;
                else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
                         svm->vcpu.arch.apf.host_apf_flags)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1a5f3908b388..11892e86cb39 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1003,11 +1003,11 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        set_dr_intercepts(svm);
 
-       set_exception_intercept(svm, PF_VECTOR);
-       set_exception_intercept(svm, UD_VECTOR);
-       set_exception_intercept(svm, MC_VECTOR);
-       set_exception_intercept(svm, AC_VECTOR);
-       set_exception_intercept(svm, DB_VECTOR);
+       set_exception_intercept(svm, INTERCEPT_PF_VECTOR);
+       set_exception_intercept(svm, INTERCEPT_UD_VECTOR);
+       set_exception_intercept(svm, INTERCEPT_MC_VECTOR);
+       set_exception_intercept(svm, INTERCEPT_AC_VECTOR);
+       set_exception_intercept(svm, INTERCEPT_DB_VECTOR);
        /*
         * Guest access to VMware backdoor ports could legitimately
         * trigger #GP because of TSS I/O permission bitmap.
@@ -1015,7 +1015,7 @@ static void init_vmcb(struct vcpu_svm *svm)
         * as VMware does.
         */
        if (enable_vmware_backdoor)
-               set_exception_intercept(svm, GP_VECTOR);
+               set_exception_intercept(svm, INTERCEPT_GP_VECTOR);
 
        svm_set_intercept(svm, INTERCEPT_INTR);
        svm_set_intercept(svm, INTERCEPT_NMI);
@@ -1093,7 +1093,7 @@ static void init_vmcb(struct vcpu_svm *svm)
                /* Setup VMCB for Nested Paging */
                control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
                svm_clr_intercept(svm, INTERCEPT_INVLPG);
-               clr_exception_intercept(svm, PF_VECTOR);
+               clr_exception_intercept(svm, INTERCEPT_PF_VECTOR);
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
                clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = svm->vcpu.arch.pat;
@@ -1135,7 +1135,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        if (sev_guest(svm->vcpu.kvm)) {
                svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
-               clr_exception_intercept(svm, UD_VECTOR);
+               clr_exception_intercept(svm, INTERCEPT_UD_VECTOR);
        }
 
        vmcb_mark_all_dirty(svm->vmcb);
@@ -1646,11 +1646,11 @@ static void update_exception_bitmap(struct kvm_vcpu 
*vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       clr_exception_intercept(svm, BP_VECTOR);
+       clr_exception_intercept(svm, INTERCEPT_BP_VECTOR);
 
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-                       set_exception_intercept(svm, BP_VECTOR);
+                       set_exception_intercept(svm, INTERCEPT_BP_VECTOR);
        }
 }
 
@@ -2817,7 +2817,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
        pr_err("%-20s%04x\n", "cr_write:", control->intercepts[CR_VECTOR] >> 
16);
        pr_err("%-20s%04x\n", "dr_read:", control->intercepts[DR_VECTOR] & 
0xffff);
        pr_err("%-20s%04x\n", "dr_write:", control->intercepts[DR_VECTOR] >> 
16);
-       pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
+       pr_err("%-20s%08x\n", "exceptions:", 
control->intercepts[EXCEPTION_VECTOR]);
        pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
        pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
        pr_err("%-20s%d\n", "pause filter threshold:",
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index d3b34e0276c5..2fc305f647a3 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -291,7 +291,7 @@ static inline void set_exception_intercept(struct vcpu_svm 
*svm, int bit)
 {
        struct vmcb *vmcb = get_host_vmcb(svm);
 
-       vmcb->control.intercept_exceptions |= (1U << bit);
+       vmcb_set_intercept(&vmcb->control, bit);
 
        recalc_intercepts(svm);
 }
@@ -300,7 +300,7 @@ static inline void clr_exception_intercept(struct vcpu_svm 
*svm, int bit)
 {
        struct vmcb *vmcb = get_host_vmcb(svm);
 
-       vmcb->control.intercept_exceptions &= ~(1U << bit);
+       vmcb_clr_intercept(&vmcb->control, bit);
 
        recalc_intercepts(svm);
 }

Reply via email to