Add a selftest to verify that a memory region backed by a guest_memfd
can be used as private guest memory. This is a key use case for
confidential computing guests where the host should not have access to the
guest's memory contents.

The new test, test_guest_private_mem, creates a protected VM, maps a
guest_memfd into the guest's address space, and then marks the region as
private. The guest code then writes to and reads from this private memory
region to verify it is accessible.

To better distinguish between the test cases, rename the existing test
that verifies shared host/guest access from test_guest_memfd_guest to
test_guest_shared_mem.

Signed-off-by: Ackerley Tng <[email protected]>
---
 .../testing/selftests/kvm/guest_memfd_test.c  | 57 +++++++++++++++++--
 1 file changed, 53 insertions(+), 4 deletions(-)

diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c 
b/tools/testing/selftests/kvm/guest_memfd_test.c
index 618c937f3c90..ecb0cbcacbec 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -406,7 +406,7 @@ static void test_guest_memfd(unsigned long vm_type)
        kvm_vm_free(vm);
 }
 
-static void guest_code(uint8_t *mem, uint64_t size)
+static void guest_code_test_guest_shared_mem(uint8_t *mem, uint64_t size)
 {
        size_t i;
 
@@ -418,7 +418,7 @@ static void guest_code(uint8_t *mem, uint64_t size)
        GUEST_DONE();
 }
 
-static void test_guest_memfd_guest(void)
+static void test_guest_shared_mem(void)
 {
        /*
         * Skip the first 4gb and slot0.  slot0 maps <1gb and is used to back
@@ -437,7 +437,8 @@ static void test_guest_memfd_guest(void)
        if (!kvm_check_cap(KVM_CAP_GUEST_MEMFD_FLAGS))
                return;
 
-       vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, 
guest_code);
+       vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1,
+                                            guest_code_test_guest_shared_mem);
 
        TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & 
GUEST_MEMFD_FLAG_MMAP,
                    "Default VM type should support MMAP, supported flags = 
0x%x",
@@ -469,6 +470,53 @@ static void test_guest_memfd_guest(void)
        kvm_vm_free(vm);
 }
 
+static void guest_code_test_guest_private_mem(uint8_t *mem)
+{
+       WRITE_ONCE(mem[0], 0xff);
+       GUEST_ASSERT_EQ(READ_ONCE(mem[0]), 0xff);
+
+       GUEST_DONE();
+}
+
+static void test_guest_private_mem(void)
+{
+       const struct vm_shape shape = {
+               .mode = VM_MODE_DEFAULT,
+               .type = KVM_X86_SW_PROTECTED_VM,
+       };
+       /*
+        * Skip the first 4gb and slot0.  slot0 maps <1gb and is used to back
+        * the guest's code, stack, and page tables, and low memory contains
+        * the PCI hole and other MMIO regions that need to be avoided.
+        */
+       const uint64_t gpa = SZ_4G;
+       const int slot = 1;
+
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       size_t npages;
+       int fd;
+
+       npages = page_size / getpagesize();
+       vm = __vm_create_shape_with_one_vcpu(shape, &vcpu, npages,
+                                            guest_code_test_guest_private_mem);
+
+       fd = vm_create_guest_memfd(vm, page_size, 0);
+       vm_mem_add(vm, VM_MEM_SRC_SHMEM, gpa, slot, npages, KVM_MEM_GUEST_MEMFD,
+                  fd, 0, 0);
+
+       virt_map(vm, gpa, gpa, npages);
+       vm_mem_set_private(vm, gpa, page_size);
+
+       vcpu_args_set(vcpu, 1, gpa);
+       vcpu_run(vcpu);
+
+       TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+
+       close(fd);
+       kvm_vm_free(vm);
+}
+
 int main(int argc, char *argv[])
 {
        unsigned long vm_types, vm_type;
@@ -488,5 +536,6 @@ int main(int argc, char *argv[])
        for_each_set_bit(vm_type, &vm_types, BITS_PER_TYPE(vm_types))
                test_guest_memfd(vm_type);
 
-       test_guest_memfd_guest();
+       test_guest_shared_mem();
+       test_guest_private_mem();
 }
-- 
2.53.0.rc1.225.gd81095ad13-goog


Reply via email to