On Wed, Aug 27, 2025 at 10:24:03AM +0800, Binbin Wu wrote:
> 
> 
> On 8/26/2025 9:07 AM, Yan Zhao wrote:
> > On Mon, Aug 25, 2025 at 02:02:00PM -0500, Sagi Shahar wrote:
> > > On Mon, Aug 25, 2025 at 3:41 AM Yan Zhao <yan.y.z...@intel.com> wrote:
> > > > On Wed, Aug 20, 2025 at 09:29:07PM -0700, Sagi Shahar wrote:
> > > > > From: Ackerley Tng <ackerley...@google.com>
> > > > > 
> > > > > TDX protected memory needs to be measured and encrypted before it can 
> > > > > be
> > > > > used by the guest. Traverse the VM's memory regions and initialize all
> > > > > the protected ranges by calling KVM_TDX_INIT_MEM_REGION.
> > > > > 
> > > > > Once all the memory is initialized, the VM can be finalized by calling
> > > > > KVM_TDX_FINALIZE_VM.
> > > > > 
> > > > > Signed-off-by: Ackerley Tng <ackerley...@google.com>
> > > > > Co-developed-by: Erdem Aktas <erdemak...@google.com>
> > > > > Signed-off-by: Erdem Aktas <erdemak...@google.com>
> > > > > Co-developed-by: Sagi Shahar <sa...@google.com>
> > > > > Signed-off-by: Sagi Shahar <sa...@google.com>
> > > > > ---
> > > > >   .../selftests/kvm/include/x86/tdx/tdx_util.h  |  2 +
> > > > >   .../selftests/kvm/lib/x86/tdx/tdx_util.c      | 97 
> > > > > +++++++++++++++++++
> > > > >   2 files changed, 99 insertions(+)
> > > > > 
> > > > > diff --git a/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h 
> > > > > b/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h
> > > > > index a2509959c7ce..2467b6c35557 100644
> > > > > --- a/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h
> > > > > +++ b/tools/testing/selftests/kvm/include/x86/tdx/tdx_util.h
> > > > > @@ -71,4 +71,6 @@ void vm_tdx_load_common_boot_parameters(struct 
> > > > > kvm_vm *vm);
> > > > >   void vm_tdx_load_vcpu_boot_parameters(struct kvm_vm *vm, struct 
> > > > > kvm_vcpu *vcpu);
> > > > >   void vm_tdx_set_vcpu_entry_point(struct kvm_vcpu *vcpu, void 
> > > > > *guest_code);
> > > > > 
> > > > > +void vm_tdx_finalize(struct kvm_vm *vm);
> > > > > +
> > > > >   #endif // SELFTESTS_TDX_TDX_UTIL_H
> > > > > diff --git a/tools/testing/selftests/kvm/lib/x86/tdx/tdx_util.c 
> > > > > b/tools/testing/selftests/kvm/lib/x86/tdx/tdx_util.c
> > > > > index d8eab99d9333..4024587ed3c2 100644
> > > > > --- a/tools/testing/selftests/kvm/lib/x86/tdx/tdx_util.c
> > > > > +++ b/tools/testing/selftests/kvm/lib/x86/tdx/tdx_util.c
> > > > > @@ -274,3 +274,100 @@ void vm_tdx_init_vm(struct kvm_vm *vm, uint64_t 
> > > > > attributes)
> > > > > 
> > > > >        free(init_vm);
> > > > >   }
> > > > > +
> > > > > +static void tdx_init_mem_region(struct kvm_vm *vm, void 
> > > > > *source_pages,
> > > > > +                             uint64_t gpa, uint64_t size)
> > > > > +{
> > > > > +     uint32_t metadata = KVM_TDX_MEASURE_MEMORY_REGION;
> > > > > +     struct kvm_tdx_init_mem_region mem_region = {
> > > > > +             .source_addr = (uint64_t)source_pages,
> > > > > +             .gpa = gpa,
> > > > > +             .nr_pages = size / PAGE_SIZE,
> > > > > +     };
> > > > > +     struct kvm_vcpu *vcpu;
> > > > > +
> > > > > +     vcpu = list_first_entry_or_null(&vm->vcpus, struct kvm_vcpu, 
> > > > > list);
> > > > > +
> > > > > +     TEST_ASSERT((mem_region.nr_pages > 0) &&
> > > > > +                 ((mem_region.nr_pages * PAGE_SIZE) == size),
> > > > > +                 "Cannot add partial pages to the guest memory.\n");
> > > > > +     TEST_ASSERT(((uint64_t)source_pages & (PAGE_SIZE - 1)) == 0,
> > > > > +                 "Source memory buffer is not page aligned\n");
> > > > > +     vm_tdx_vcpu_ioctl(vcpu, KVM_TDX_INIT_MEM_REGION, metadata, 
> > > > > &mem_region);
> > > > > +}
> > > > > +
> > > > > +static void tdx_init_pages(struct kvm_vm *vm, void *hva, uint64_t 
> > > > > gpa,
> > > > > +                        uint64_t size)
> > > > > +{
> > > > > +     void *scratch_page = calloc(1, PAGE_SIZE);
> > > > > +     uint64_t nr_pages = size / PAGE_SIZE;
> > > > > +     int i;
> > > > > +
> > > > > +     TEST_ASSERT(scratch_page,
> > > > > +                 "Could not allocate memory for loading memory 
> > > > > region");
> > > > > +
> > > > > +     for (i = 0; i < nr_pages; i++) {
> > > > > +             memcpy(scratch_page, hva, PAGE_SIZE);
> > > > > +
> > > > > +             tdx_init_mem_region(vm, scratch_page, gpa, PAGE_SIZE);
> > > > > +
> > > > > +             hva += PAGE_SIZE;
> > > > > +             gpa += PAGE_SIZE;
> > > > > +     }
> > > > > +
> > > > > +     free(scratch_page);
> > > > > +}
> > > > > +
> > > > > +static void load_td_private_memory(struct kvm_vm *vm)
> > > > > +{
> > > > > +     struct userspace_mem_region *region;
> > > > > +     int ctr;
> > > > > +
> > > > > +     hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
> > > > > +             const struct sparsebit *protected_pages = 
> > > > > region->protected_phy_pages;
> > > > > +             const vm_paddr_t gpa_base = 
> > > > > region->region.guest_phys_addr;
> > > > > +             const uint64_t hva_base = region->region.userspace_addr;
> > > > > +             const sparsebit_idx_t lowest_page_in_region = gpa_base 
> > > > > >> vm->page_shift;
> > > > > +
> > > > > +             sparsebit_idx_t i;
> > > > > +             sparsebit_idx_t j;
> > > > > +
> > > > > +             if (!sparsebit_any_set(protected_pages))
> > > > > +                     continue;
> > > > > +
> > > > > +             sparsebit_for_each_set_range(protected_pages, i, j) {
> > > > > +                     const uint64_t size_to_load = (j - i + 1) * 
> > > > > vm->page_size;
> > > > > +                     const uint64_t offset =
> > > > > +                             (i - lowest_page_in_region) * 
> > > > > vm->page_size;
> > > > > +                     const uint64_t hva = hva_base + offset;
> > > > > +                     const uint64_t gpa = gpa_base + offset;
> > > > > +
> > > > > +                     vm_set_memory_attributes(vm, gpa, size_to_load,
> > > > > +                                              
> > > > > KVM_MEMORY_ATTRIBUTE_PRIVATE);
> > > > > +
> > > > > +                     /*
> > > > > +                      * Here, memory is being loaded from hva to 
> > > > > gpa. If the memory
> > > > > +                      * mapped to hva is also used to back gpa, then 
> > > > > a copy has to be
> > > > > +                      * made just for loading, since 
> > > > > KVM_TDX_INIT_MEM_REGION ioctl
> > > > > +                      * cannot encrypt memory in place.
> > > > > +                      *
> > > > > +                      * To determine if memory mapped to hva is also 
> > > > > used to back
> > > > > +                      * gpa, use a heuristic:
> > > > > +                      *
> > > > > +                      * If this memslot has guest_memfd, then this 
> > > > > memslot should
> > > > > +                      * have memory backed from two sources: hva for 
> > > > > shared memory
> > > > > +                      * and gpa will be backed by guest_memfd.
> > > > > +                      */
> > > > > +                     if (region->region.guest_memfd == -1)
> > > > Why to pass !guest_memfd region to tdx_init_mem_region()?
> > > > 
> > > Not sure I understand your comment.
> >  From the implementation of tdx_init_pages(), it also invokes
> > tdx_init_mem_region(), which further invokes ioctl KVM_TDX_INIT_MEM_REGION.
> > 
> > However, if the region is with guest_memfd == -1, the ioctl
> > KVM_TDX_INIT_MEM_REGION should fail as kvm_gmem_populate() won't succeed.
> > 
> > So, I'm wondering why there's a need to for the case of
> > "region->region.guest_memfd == -1".
> > 
> > Or anything I missed?
> I had the same question in v8
> https://lore.kernel.org/lkml/4b7e7099-79da-4178-8f16-6780d8137...@linux.intel.com/
I agree with you.
In patch "KVM: selftests: TDX: Test LOG_DIRTY_PAGES flag to a non-GUEST_MEMFD
memslot", virt_map_shared() prevents the non-guest_memfd region from being
searched by the load_td_private_memory().
Otherwise, the tdx_init_mem_region() would fail on this region.

> I guess the code path for non-guest_memfd is due to some old versions of TDX 
> KVM
> code before upstream. Currently, KVM doesn't support private memory from
> non-guest_memfd backed memory.
I guess so. Maybe just drop this case and assert?

TEST_ASSERT(region->region.guest_memfd != -1, "TDX private memory only supports
guest_memfd backend\n");

> > 
> > > > > +                             tdx_init_pages(vm, (void *)hva, gpa, 
> > > > > size_to_load);
> > > > > +                     else
> > > > > +                             tdx_init_mem_region(vm, (void *)hva, 
> > > > > gpa, size_to_load);
> > > > > +             }
> > > > > +     }
> > > > > +}
> > > > > +
> > > > > +void vm_tdx_finalize(struct kvm_vm *vm)
> > > > > +{
> > > > > +     load_td_private_memory(vm);
> > > > > +     vm_tdx_vm_ioctl(vm, KVM_TDX_FINALIZE_VM, 0, NULL);
> > > > > +}
> > > > > --
> > > > > 2.51.0.rc1.193.gad69d77794-goog
> > > > > 
> > > > > 
> 

Reply via email to