Introduce interface for activate/deactivate posted interrupts, and
implement SVM hooks to toggle AMD IOMMU guest virtual APIC mode.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
---
 arch/x86/include/asm/kvm_host.h |  4 ++++
 arch/x86/kvm/svm.c              | 44 +++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c              |  6 ++++++
 3 files changed, 54 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dfb7c3d..0d8544b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1183,6 +1183,10 @@ struct kvm_x86_ops {
 
        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
                              uint32_t guest_irq, bool set);
+
+       int (*activate_pi_irte)(struct kvm_vcpu *vcpu);
+       int (*deactivate_pi_irte)(struct kvm_vcpu *vcpu);
+
        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
 
        int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6851bce..b674cd0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5374,6 +5374,48 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned 
int host_irq,
        return ret;
 }
 
+static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
+{
+       int ret = 0;
+       unsigned long flags;
+       struct amd_svm_iommu_ir *ir;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (!kvm_arch_has_assigned_device(vcpu->kvm))
+               return 0;
+
+       /*
+        * Here, we go through the per-vcpu ir_list to update all existing
+        * interrupt remapping table entry targeting this vcpu.
+        */
+       spin_lock_irqsave(&svm->ir_list_lock, flags);
+
+       if (list_empty(&svm->ir_list))
+               goto out;
+
+       list_for_each_entry(ir, &svm->ir_list, node) {
+               if (activate)
+                       ret = amd_iommu_activate_guest_mode(ir->data);
+               else
+                       ret = amd_iommu_deactivate_guest_mode(ir->data);
+               if (ret)
+                       break;
+       }
+out:
+       spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+       return ret;
+}
+
+static int svm_activate_pi_irte(struct kvm_vcpu *vcpu)
+{
+       return svm_set_pi_irte_mode(vcpu, true);
+}
+
+static int svm_deactivate_pi_irte(struct kvm_vcpu *vcpu)
+{
+       return svm_set_pi_irte_mode(vcpu, false);
+}
+
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -7269,6 +7311,8 @@ static bool svm_need_emulation_on_page_fault(struct 
kvm_vcpu *vcpu)
        .pmu_ops = &amd_pmu_ops,
        .deliver_posted_interrupt = svm_deliver_avic_intr,
        .update_pi_irte = svm_update_pi_irte,
+       .activate_pi_irte = svm_activate_pi_irte,
+       .deactivate_pi_irte = svm_deactivate_pi_irte,
        .setup_mce = svm_setup_mce,
 
        .smi_allowed = svm_smi_allowed,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 40a20bf..5ab1643 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7177,6 +7177,9 @@ void kvm_vcpu_activate_apicv(struct kvm_vcpu *vcpu)
        kvm_apic_update_apicv(vcpu);
 
        kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
+
+       if (kvm_x86_ops->activate_pi_irte)
+               kvm_x86_ops->activate_pi_irte(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_activate_apicv);
 
@@ -7192,6 +7195,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
        vcpu->arch.apicv_active = false;
        kvm_apic_update_apicv(vcpu);
 
+       if (kvm_x86_ops->deactivate_pi_irte)
+               kvm_x86_ops->deactivate_pi_irte(vcpu);
+
        kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_deactivate_apicv);
-- 
1.8.3.1

Reply via email to