For x86, override the default implementations of content mode functions to
handle reporting of supported content modes, and application of requested
modes based on x86 VM types.

Signed-off-by: Ackerley Tng <[email protected]>
---
 arch/x86/kvm/x86.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 101 insertions(+)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9c29407712580..3bbc8ffbf489e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -14078,6 +14078,107 @@ void kvm_arch_gmem_invalidate(kvm_pfn_t start, 
kvm_pfn_t end)
        kvm_x86_call(gmem_invalidate)(start, end);
 }
 #endif
+
+u64 kvm_arch_gmem_supported_content_modes(struct kvm *kvm)
+{
+       switch (kvm->arch.vm_type) {
+       case KVM_X86_SW_PROTECTED_VM:
+               return KVM_SET_MEMORY_ATTRIBUTES2_ZERO |
+                      KVM_SET_MEMORY_ATTRIBUTES2_PRESERVE;
+       case KVM_X86_SNP_VM:
+       case KVM_X86_TDX_VM:
+               return KVM_SET_MEMORY_ATTRIBUTES2_ZERO;
+       default:
+               return 0;
+       }
+}
+
+int kvm_arch_gmem_apply_content_mode_zero(struct kvm *kvm, struct inode *inode,
+                                         pgoff_t start, pgoff_t end)
+{
+       switch (kvm->arch.vm_type) {
+       case KVM_X86_SW_PROTECTED_VM:
+       case KVM_X86_SNP_VM:
+       case KVM_X86_TDX_VM:
+               /*
+                * TDX firmware will zero on unmapping from the
+                * Secure-EPTs, but suppose a shared page with
+                * contents was converted to private, and then
+                * converted back without ever being mapped into
+                * Secure-EPTs: guest_memfd can't rely on TDX firmware
+                * for zeroing then.
+                */
+               return kvm_gmem_apply_content_mode_zero(inode, start, end);
+       default:
+               WARN_ONCE(1, "Unexpected request to zero for vm_type.");
+               return -EOPNOTSUPP;
+       }
+}
+
+int kvm_arch_gmem_apply_content_mode_preserve(struct kvm *kvm,
+                                             struct inode *inode,
+                                             pgoff_t start, pgoff_t end)
+{
+       switch (kvm->arch.vm_type) {
+       case KVM_X86_SW_PROTECTED_VM:
+               return 0;
+       default:
+               WARN_ONCE(1, "Unexpected request to preserve for vm_type.");
+               return -EOPNOTSUPP;
+       }
+}
+
+static int __scramble_range(struct inode *inode, pgoff_t start, pgoff_t end)
+{
+       struct address_space *mapping = inode->i_mapping;
+       struct folio_batch fbatch;
+       struct folio *f;
+       char *kaddr;
+       int ret = 0;
+       int i;
+
+       folio_batch_init(&fbatch);
+       while (!ret && filemap_get_folios(mapping, &start, end - 1, &fbatch)) {
+               for (i = 0; !ret && i < folio_batch_count(&fbatch); ++i) {
+                       f = fbatch.folios[i];
+
+                       folio_lock(f);
+
+                       if (folio_test_hwpoison(f)) {
+                               ret = -EHWPOISON;
+                       } else {
+                               /*
+                                * Hard-coding range to scramble since
+                                * guest_memfd only supports PAGE_SIZE
+                                * folios now.
+                                */
+                               kaddr = kmap_local_folio(f, 0);
+                               get_random_bytes(kaddr, PAGE_SIZE);
+                               kunmap_local(kaddr);
+                       }
+
+                       folio_unlock(f);
+               }
+
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
+
+       return ret;
+}
+
+int kvm_arch_gmem_apply_content_mode_unspecified(struct kvm *kvm,
+                                                struct inode *inode,
+                                                pgoff_t start, pgoff_t end)
+{
+       switch (kvm->arch.vm_type) {
+       case KVM_X86_SW_PROTECTED_VM:
+               return __scramble_range(inode, start, end);
+       default:
+               return 0;
+       }
+}
+
 #endif
 
 int kvm_spec_ctrl_test_value(u64 value)

-- 
2.53.0.1018.g2bb0e51243-goog


Reply via email to