Implement the service call for configuring a shared structure between a
vCPU and the hypervisor in which the hypervisor can tell the vCPU that is
running or not.

Signed-off-by: Zengruan Ye <yezengr...@huawei.com>
---
 arch/arm/include/asm/kvm_host.h   | 18 +++++++++++++
 arch/arm64/include/asm/kvm_host.h | 18 +++++++++++++
 arch/arm64/kvm/Makefile           |  1 +
 virt/kvm/arm/arm.c                |  8 ++++++
 virt/kvm/arm/hypercalls.c         | 17 ++++++++++++
 virt/kvm/arm/pvlock.c             | 45 +++++++++++++++++++++++++++++++
 6 files changed, 107 insertions(+)
 create mode 100644 virt/kvm/arm/pvlock.c

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 556cd818eccf..dfeaf9204875 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -356,6 +356,24 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
kvm_vcpu_arch *vcpu_arch)
        return false;
 }
 
+static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+}
+
+static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+       return false;
+}
+
+static inline gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu)
+{
+       return GPA_INVALID;
+}
+
+static inline void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 
preempted)
+{
+}
+
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index c61260cf63c5..10f8c4bbf97e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -354,6 +354,11 @@ struct kvm_vcpu_arch {
                u64 last_steal;
                gpa_t base;
        } steal;
+
+       /* Guest PV lock state */
+       struct {
+               gpa_t base;
+       } pv;
 };
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -515,6 +520,19 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
kvm_vcpu_arch *vcpu_arch)
        return (vcpu_arch->steal.base != GPA_INVALID);
 }
 
+static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+       vcpu_arch->pv.base = GPA_INVALID;
+}
+
+static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+       return (vcpu_arch->pv.base != GPA_INVALID);
+}
+
+gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu);
+void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted);
+
 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 5ffbdc39e780..e4591f56d5f1 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -15,6 +15,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o 
$(KVM)/arm/mmu.o $(KVM)/arm/mmio.
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvtime.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvlock.o
 
 kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
 kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 8de4daf25097..36d57e77d3c4 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -383,6 +383,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
        kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 
+       kvm_arm_pvlock_preempted_init(&vcpu->arch);
+
        return kvm_vgic_vcpu_init(vcpu);
 }
 
@@ -421,6 +423,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                vcpu_set_wfx_traps(vcpu);
 
        vcpu_ptrauth_setup_lazy(vcpu);
+
+       if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
+               kvm_update_pvlock_preempted(vcpu, 0);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -434,6 +439,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        vcpu->cpu = -1;
 
        kvm_arm_set_running_vcpu(NULL);
+
+       if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
+               kvm_update_pvlock_preempted(vcpu, 1);
 }
 
 static void vcpu_power_off(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c
index bdbab9ef6d2d..7f90b413641c 100644
--- a/virt/kvm/arm/hypercalls.c
+++ b/virt/kvm/arm/hypercalls.c
@@ -72,6 +72,23 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
                break;
        case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
                val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
+               if (!vcpu_el1_is_32bit(vcpu))
+                       val[0] |= BIT(ARM_SMCCC_KVM_FUNC_PV_LOCK);
+               break;
+       case ARM_SMCCC_VENDOR_HYP_KVM_PV_LOCK_FUNC_ID:
+               if (vcpu_el1_is_32bit(vcpu))
+                       break;
+               option = smccc_get_arg1(vcpu);
+               switch (option) {
+               case KVM_PV_LOCK_FEATURES:
+                       val[0] = SMCCC_RET_SUCCESS;
+                       break;
+               case KVM_PV_LOCK_PREEMPTED:
+                       gpa = kvm_init_pvlock(vcpu);
+                       if (gpa != GPA_INVALID)
+                               val[0] = gpa;
+                       break;
+               }
                break;
        default:
                return kvm_psci_call(vcpu);
diff --git a/virt/kvm/arm/pvlock.c b/virt/kvm/arm/pvlock.c
new file mode 100644
index 000000000000..0644b23be51e
--- /dev/null
+++ b/virt/kvm/arm/pvlock.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2019 Huawei Technologies Co., Ltd
+ * Author: Zengruan Ye <yezengr...@huawei.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
+
+#include <asm/pvlock-abi.h>
+
+#include <kvm/arm_hypercalls.h>
+
+gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu)
+{
+       struct pvlock_vcpu_state init_values = {};
+       struct kvm *kvm = vcpu->kvm;
+       u64 base = vcpu->arch.pv.base;
+       int idx;
+
+       if (base == GPA_INVALID)
+               return base;
+
+       idx = srcu_read_lock(&kvm->srcu);
+       kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
+       srcu_read_unlock(&kvm->srcu, idx);
+
+       return base;
+}
+
+void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted)
+{
+       int idx;
+       u64 offset;
+       __le64 preempted_le;
+       struct kvm *kvm = vcpu->kvm;
+       u64 base = vcpu->arch.pv.base;
+
+       preempted_le = cpu_to_le64(preempted);
+
+       idx = srcu_read_lock(&kvm->srcu);
+       offset = offsetof(struct pvlock_vcpu_state, preempted);
+       kvm_put_guest(kvm, base + offset, preempted_le, u64);
+       srcu_read_unlock(&kvm->srcu, idx);
+}
-- 
2.19.1


_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to