Paolo,

This one is not supposed to be pulled until we fix KVM as you said:
https://lore.kernel.org/qemu-devel/d0983ba3-383b-4c81-9cfd-b5b0d26a5...@redhat.com/

BTW, I have sent the KVM fix:
https://lore.kernel.org/all/20250611001018.2179964-1-xiaoyao...@intel.com/

On 6/6/2025 8:34 PM, Paolo Bonzini wrote:
From: Tom Lendacky <thomas.lenda...@amd.com>

A page state change is typically followed by an access of the page(s) and
results in another VMEXIT in order to map the page into the nested page
table. Depending on the size of page state change request, this can
generate a number of additional VMEXITs. For example, under SNP, when
Linux is utilizing lazy memory acceptance, memory is typically accepted in
4M chunks. A page state change request is submitted to mark the pages as
private, followed by validation of the memory. Since the guest_memfd
currently only supports 4K pages, each page validation will result in
VMEXIT to map the page, resulting in 1024 additional exits.

When performing a page state change, invoke KVM_PRE_FAULT_MEMORY for the
size of the page state change in order to pre-map the pages and avoid the
additional VMEXITs. This helps speed up boot times.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
Link: 
https://lore.kernel.org/r/f5411c42340bd2f5c14972551edb4e959995e42b.1743193824.git.thomas.lenda...@amd.com
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
  include/system/kvm.h  |  1 +
  accel/kvm/kvm-all.c   |  2 ++
  target/i386/kvm/kvm.c | 31 ++++++++++++++++++++++++++-----
  3 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/include/system/kvm.h b/include/system/kvm.h
index 62ec131d4d8..7cc60d26f24 100644
--- a/include/system/kvm.h
+++ b/include/system/kvm.h
@@ -42,6 +42,7 @@ extern bool kvm_gsi_routing_allowed;
  extern bool kvm_gsi_direct_mapping;
  extern bool kvm_readonly_mem_allowed;
  extern bool kvm_msi_use_devid;
+extern bool kvm_pre_fault_memory_supported;
#define kvm_enabled() (kvm_allowed)
  /**
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 51526d301b9..a31778341c2 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -99,6 +99,7 @@ bool kvm_allowed;
  bool kvm_readonly_mem_allowed;
  bool kvm_vm_attributes_allowed;
  bool kvm_msi_use_devid;
+bool kvm_pre_fault_memory_supported;
  static bool kvm_has_guest_debug;
  static int kvm_sstep_flags;
  static bool kvm_immediate_exit;
@@ -2745,6 +2746,7 @@ static int kvm_init(MachineState *ms)
          kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
          kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
          (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
+    kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, 
KVM_CAP_PRE_FAULT_MEMORY);
if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
          s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? 
ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index a6bc089d020..56a6b9b6381 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -6018,9 +6018,11 @@ static bool host_supports_vmx(void)
   * because private/shared page tracking is already provided through other
   * means, these 2 use-cases should be treated as being mutually-exclusive.
   */
-static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
+static int kvm_handle_hc_map_gpa_range(X86CPU *cpu, struct kvm_run *run)
  {
+    struct kvm_pre_fault_memory mem;
      uint64_t gpa, size, attributes;
+    int ret;
if (!machine_require_guest_memfd(current_machine))
          return -EINVAL;
@@ -6031,13 +6033,32 @@ static int kvm_handle_hc_map_gpa_range(struct kvm_run 
*run)
trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags); - return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
+    ret = kvm_convert_memory(gpa, size, attributes & 
KVM_MAP_GPA_RANGE_ENCRYPTED);
+    if (ret || !kvm_pre_fault_memory_supported) {
+        return ret;
+    }
+
+    /*
+     * Opportunistically pre-fault memory in. Failures are ignored so that any
+     * errors in faulting in the memory will get captured in KVM page fault
+     * path when the guest first accesses the page.
+     */
+    memset(&mem, 0, sizeof(mem));
+    mem.gpa = gpa;
+    mem.size = size;
+    while (mem.size) {
+        if (kvm_vcpu_ioctl(CPU(cpu), KVM_PRE_FAULT_MEMORY, &mem)) {
+            break;
+        }
+    }
+
+    return 0;
  }
-static int kvm_handle_hypercall(struct kvm_run *run)
+static int kvm_handle_hypercall(X86CPU *cpu, struct kvm_run *run)
  {
      if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)
-        return kvm_handle_hc_map_gpa_range(run);
+        return kvm_handle_hc_map_gpa_range(cpu, run);
return -EINVAL;
  }
@@ -6137,7 +6158,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run 
*run)
          break;
  #endif
      case KVM_EXIT_HYPERCALL:
-        ret = kvm_handle_hypercall(run);
+        ret = kvm_handle_hypercall(cpu, run);
          break;
      case KVM_EXIT_SYSTEM_EVENT:
          switch (run->system_event.type) {


Reply via email to