Test that
+ memory failure handling results in unmapping of bad memory from stage
  2 page tables, hence requiring faulting on next guest access
+ when the guest tries to fault a poisoned page from guest_memfd, the
  userspace VMM informed with EHWPOISON

Co-developed-by: Ackerley Tng <[email protected]>
Signed-off-by: Ackerley Tng <[email protected]>
Signed-off-by: Lisa Wang <[email protected]>
---
 tools/testing/selftests/kvm/guest_memfd_test.c | 70 +++++++++++++++++++++++++-
 1 file changed, 69 insertions(+), 1 deletion(-)

diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c 
b/tools/testing/selftests/kvm/guest_memfd_test.c
index 0ea4e7d7e6d5..6615ee8bfcd8 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -26,6 +26,7 @@
 #include "ucall_common.h"
 
 static size_t page_size;
+static uint64_t test_memory_failure_guest_gpa;
 
 static void test_file_read_write(int fd, size_t total_size)
 {
@@ -637,6 +638,73 @@ static void test_guest_memfd_guest(void)
        kvm_vm_free(vm);
 }
 
+static void __guest_code_read(void)
+{
+       uint8_t *mem = (uint8_t *)test_memory_failure_guest_gpa;
+
+       READ_ONCE(*mem);
+       GUEST_SYNC(0);
+       READ_ONCE(*mem);
+       GUEST_DONE();
+}
+
+static void guest_read(struct kvm_vcpu *vcpu, int expected_errno)
+{
+       if (expected_errno) {
+               TEST_ASSERT_EQ(_vcpu_run(vcpu), -1);
+               TEST_ASSERT_EQ(errno, expected_errno);
+       } else {
+               vcpu_run(vcpu);
+               TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+       }
+}
+
+static void test_memory_failure_guest(void)
+{
+       const uint64_t gpa = SZ_4G;
+       const int slot = 1;
+
+       unsigned long memory_failure_pfn;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       uint8_t *mem;
+       size_t size;
+       int fd;
+
+       if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_FLAGS))
+               return;
+
+       vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, 
__guest_code_read);
+
+       size = vm->page_size;
+       fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP | 
GUEST_MEMFD_FLAG_INIT_SHARED);
+       vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, 
NULL, fd, 0);
+
+       mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+       TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should 
succeed.");
+       virt_pg_map(vm, gpa, gpa);
+
+       test_memory_failure_guest_gpa = gpa;
+       sync_global_to_guest(vm, test_memory_failure_guest_gpa);
+
+       /* Fault in page to read pfn, then unmap page for testing. */
+       READ_ONCE(*mem);
+       memory_failure_pfn = addr_to_pfn(mem);
+       munmap(mem, size);
+
+       /* Fault page into stage2 page tables. */
+       guest_read(vcpu, 0);
+
+       mark_memory_failure(memory_failure_pfn, 0);
+
+       guest_read(vcpu, EHWPOISON);
+
+       close(fd);
+       kvm_vm_free(vm);
+
+       unmark_memory_failure(memory_failure_pfn, 0);
+}
+
 int main(int argc, char *argv[])
 {
        unsigned long vm_types, vm_type;
@@ -644,7 +712,6 @@ int main(int argc, char *argv[])
        TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
 
        page_size = getpagesize();
-
        /*
         * Not all architectures support KVM_CAP_VM_TYPES. However, those that
         * support guest_memfd have that support for the default VM type.
@@ -657,4 +724,5 @@ int main(int argc, char *argv[])
                test_guest_memfd(vm_type);
 
        test_guest_memfd_guest();
+       test_memory_failure_guest();
 }

-- 
2.53.0.1213.gd9a14994de-goog


Reply via email to