From: Jack Thomson <[email protected]>
Add kvm_arch_vcpu_pre_fault_memory() for arm64. The implementation hands
off the stage-2 faulting logic to either gmem_abort() or
user_mem_abort().
Add an optional page_size output parameter to user_mem_abort() to
return the VMA page size, which is needed when pre-faulting.
Update the documentation to clarify x86 specific behaviour.
Signed-off-by: Jack Thomson <[email protected]>
---
Documentation/virt/kvm/api.rst | 3 +-
arch/arm64/kvm/Kconfig | 1 +
arch/arm64/kvm/arm.c | 1 +
arch/arm64/kvm/mmu.c | 79 ++++++++++++++++++++++++++++++++--
4 files changed, 79 insertions(+), 5 deletions(-)
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 01a3abef8abb..44cfd9e736bb 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -6493,7 +6493,8 @@ Errors:
KVM_PRE_FAULT_MEMORY populates KVM's stage-2 page tables used to map memory
for the current vCPU state. KVM maps memory as if the vCPU generated a
stage-2 read page fault, e.g. faults in memory as needed, but doesn't break
-CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed.
+CoW. However, on x86, KVM does not mark any newly created stage-2 PTE as
+Accessed.
In the case of confidential VM types where there is an initial set up of
private guest memory before the guest is 'finalized'/measured, this ioctl
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 4f803fd1c99a..6872aaabe16c 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -25,6 +25,7 @@ menuconfig KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select KVM_GENERIC_PRE_FAULT_MEMORY
select VIRT_XFER_TO_GUEST_WORK
select KVM_VFIO
select HAVE_KVM_DIRTY_RING_ACQ_REL
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 4f80da0c0d1d..19bac68f737f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -332,6 +332,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_COUNTER_OFFSET:
case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
case KVM_CAP_ARM_SEA_TO_USER:
+ case KVM_CAP_PRE_FAULT_MEMORY:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 48d7c372a4cd..499b131f794e 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1642,8 +1642,8 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t
fault_ipa,
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, unsigned long hva,
- bool fault_is_perm)
+ struct kvm_memory_slot *memslot, unsigned long
*page_size,
+ unsigned long hva, bool fault_is_perm)
{
int ret = 0;
bool topup_memcache;
@@ -1923,6 +1923,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu,
phys_addr_t fault_ipa,
kvm_release_faultin_page(kvm, page, !!ret, writable);
kvm_fault_unlock(kvm);
+ if (page_size)
+ *page_size = vma_pagesize;
+
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret)
mark_page_dirty_in_slot(kvm, memslot, gfn);
@@ -2196,8 +2199,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
ret = gmem_abort(vcpu, fault_ipa, nested, memslot,
esr_fsc_is_permission_fault(esr));
else
- ret = user_mem_abort(vcpu, fault_ipa, nested, memslot, hva,
- esr_fsc_is_permission_fault(esr));
+ ret = user_mem_abort(vcpu, fault_ipa, nested, memslot, NULL,
+ hva, esr_fsc_is_permission_fault(esr));
if (ret == 0)
ret = 1;
out:
@@ -2573,3 +2576,71 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool
was_enabled)
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}
+
+long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+ struct kvm_pre_fault_memory *range)
+{
+ struct kvm_vcpu_fault_info *fault_info = &vcpu->arch.fault;
+ struct kvm_s2_trans nested_trans, *nested = NULL;
+ unsigned long page_size = PAGE_SIZE;
+ struct kvm_memory_slot *memslot;
+ phys_addr_t ipa = range->gpa;
+ phys_addr_t end;
+ hva_t hva;
+ gfn_t gfn;
+ int ret;
+
+ if (vcpu_is_protected(vcpu))
+ return -EOPNOTSUPP;