Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-17 Thread Chao Peng
On Wed, Nov 16, 2022 at 10:24:11PM +, Sean Christopherson wrote:
> On Tue, Oct 25, 2022, Chao Peng wrote:
> > +static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t 
> > size,
> > +bool is_private)
> > +{
> > +   gfn_t start, end;
> > +   unsigned long i;
> > +   void *entry;
> > +   int idx;
> > +   int r = 0;
> > +
> > +   if (size == 0 || gpa + size < gpa)
> > +   return -EINVAL;
> > +   if (gpa & (PAGE_SIZE - 1) || size & (PAGE_SIZE - 1))
> > +   return -EINVAL;
> > +
> > +   start = gpa >> PAGE_SHIFT;
> > +   end = (gpa + size - 1 + PAGE_SIZE) >> PAGE_SHIFT;
> > +
> > +   /*
> > +* Guest memory defaults to private, kvm->mem_attr_array only stores
> > +* shared memory.
> > +*/
> > +   entry = is_private ? NULL : xa_mk_value(KVM_MEM_ATTR_SHARED);
> > +
> > +   idx = srcu_read_lock(>srcu);
> > +   KVM_MMU_LOCK(kvm);
> > +   kvm_mmu_invalidate_begin(kvm, start, end);
> > +
> > +   for (i = start; i < end; i++) {
> > +   r = xa_err(xa_store(>mem_attr_array, i, entry,
> > +   GFP_KERNEL_ACCOUNT));
> > +   if (r)
> > +   goto err;
> > +   }
> > +
> > +   kvm_unmap_mem_range(kvm, start, end);
> > +
> > +   goto ret;
> > +err:
> > +   for (; i > start; i--)
> > +   xa_erase(>mem_attr_array, i);
> 
> I don't think deleting previous entries is correct.  To unwind, the correct 
> thing
> to do is restore the original values.  E.g. if userspace space is mapping a 
> large
> range as shared, and some of the previous entries were shared, deleting them 
> would
> incorrectly "convert" those entries to private.

Ah, right!

> 
> Tracking the previous state likely isn't the best approach, e.g. it would 
> require
> speculatively allocating extra memory for a rare condition that is likely 
> going to
> lead to OOM anyways.

Agree.

> 
> Instead of trying to unwind, what about updating the ioctl() params such that
> retrying with the updated addr+size would Just Work?  E.g.

Looks good to me. Thanks!

Chao
> 
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 55b07aae67cc..f1de592a1a06 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1015,15 +1015,12 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, 
> gpa_t gpa, gpa_t size,
>  
> kvm_unmap_mem_range(kvm, start, end, attr);
>  
> -   goto ret;
> -err:
> -   for (; i > start; i--)
> -   xa_erase(>mem_attr_array, i);
> -ret:
> kvm_mmu_invalidate_end(kvm, start, end);
> KVM_MMU_UNLOCK(kvm);
> srcu_read_unlock(>srcu, idx);
>  
> +   
> +
> return r;
>  }
>  #endif /* CONFIG_KVM_GENERIC_PRIVATE_MEM */
> @@ -4989,6 +4986,8 @@ static long kvm_vm_ioctl(struct file *filp,
>  
> r = kvm_vm_ioctl_set_mem_attr(kvm, region.addr,
>   region.size, set);
> +   if (copy_to_user(argp, , sizeof(region)) && !r)
> +   r = -EFAULT
> break;
> }
>  #endif



Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-16 Thread Sean Christopherson
On Tue, Oct 25, 2022, Chao Peng wrote:
> +static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
> +  bool is_private)
> +{
> + gfn_t start, end;
> + unsigned long i;
> + void *entry;
> + int idx;
> + int r = 0;
> +
> + if (size == 0 || gpa + size < gpa)
> + return -EINVAL;
> + if (gpa & (PAGE_SIZE - 1) || size & (PAGE_SIZE - 1))
> + return -EINVAL;
> +
> + start = gpa >> PAGE_SHIFT;
> + end = (gpa + size - 1 + PAGE_SIZE) >> PAGE_SHIFT;
> +
> + /*
> +  * Guest memory defaults to private, kvm->mem_attr_array only stores
> +  * shared memory.
> +  */
> + entry = is_private ? NULL : xa_mk_value(KVM_MEM_ATTR_SHARED);
> +
> + idx = srcu_read_lock(>srcu);
> + KVM_MMU_LOCK(kvm);
> + kvm_mmu_invalidate_begin(kvm, start, end);
> +
> + for (i = start; i < end; i++) {
> + r = xa_err(xa_store(>mem_attr_array, i, entry,
> + GFP_KERNEL_ACCOUNT));
> + if (r)
> + goto err;
> + }
> +
> + kvm_unmap_mem_range(kvm, start, end);
> +
> + goto ret;
> +err:
> + for (; i > start; i--)
> + xa_erase(>mem_attr_array, i);

I don't think deleting previous entries is correct.  To unwind, the correct 
thing
to do is restore the original values.  E.g. if userspace space is mapping a 
large
range as shared, and some of the previous entries were shared, deleting them 
would
incorrectly "convert" those entries to private.

Tracking the previous state likely isn't the best approach, e.g. it would 
require
speculatively allocating extra memory for a rare condition that is likely going 
to
lead to OOM anyways.

Instead of trying to unwind, what about updating the ioctl() params such that
retrying with the updated addr+size would Just Work?  E.g.

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 55b07aae67cc..f1de592a1a06 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1015,15 +1015,12 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, 
gpa_t gpa, gpa_t size,
 
kvm_unmap_mem_range(kvm, start, end, attr);
 
-   goto ret;
-err:
-   for (; i > start; i--)
-   xa_erase(>mem_attr_array, i);
-ret:
kvm_mmu_invalidate_end(kvm, start, end);
KVM_MMU_UNLOCK(kvm);
srcu_read_unlock(>srcu, idx);
 
+   
+
return r;
 }
 #endif /* CONFIG_KVM_GENERIC_PRIVATE_MEM */
@@ -4989,6 +4986,8 @@ static long kvm_vm_ioctl(struct file *filp,
 
r = kvm_vm_ioctl_set_mem_attr(kvm, region.addr,
  region.size, set);
+   if (copy_to_user(argp, , sizeof(region)) && !r)
+   r = -EFAULT
break;
}
 #endif



Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-08 Thread Yuan Yao
On Tue, Nov 08, 2022 at 05:41:41PM +0800, Chao Peng wrote:
> On Tue, Nov 08, 2022 at 09:35:06AM +0800, Yuan Yao wrote:
> > On Tue, Oct 25, 2022 at 11:13:41PM +0800, Chao Peng wrote:
> > > Introduce generic private memory register/unregister by reusing existing
> > > SEV ioctls KVM_MEMORY_ENCRYPT_{UN,}REG_REGION. It differs from SEV case
> > > by treating address in the region as gpa instead of hva. Which cases
> > > should these ioctls go is determined by the kvm_arch_has_private_mem().
> > > Architecture which supports KVM_PRIVATE_MEM should override this function.
> > >
> > > KVM internally defaults all guest memory as private memory and maintain
> > > the shared memory in 'mem_attr_array'. The above ioctls operate on this
> > > field and unmap existing mappings if any.
> > >
> > > Signed-off-by: Chao Peng 
> > > ---
> > >  Documentation/virt/kvm/api.rst |  17 ++-
> > >  arch/x86/kvm/Kconfig   |   1 +
> > >  include/linux/kvm_host.h   |  10 +-
> > >  virt/kvm/Kconfig   |   4 +
> > >  virt/kvm/kvm_main.c| 227 +
> > >  5 files changed, 198 insertions(+), 61 deletions(-)
> > >
> > > diff --git a/Documentation/virt/kvm/api.rst 
> > > b/Documentation/virt/kvm/api.rst
> > > index 975688912b8c..08253cf498d1 100644
> > > --- a/Documentation/virt/kvm/api.rst
> > > +++ b/Documentation/virt/kvm/api.rst
> > > @@ -4717,10 +4717,19 @@ 
> > > Documentation/virt/kvm/x86/amd-memory-encryption.rst.
> > >  This ioctl can be used to register a guest memory region which may
> > >  contain encrypted data (e.g. guest RAM, SMRAM etc).
> > >
> > > -It is used in the SEV-enabled guest. When encryption is enabled, a guest
> > > -memory region may contain encrypted data. The SEV memory encryption
> > > -engine uses a tweak such that two identical plaintext pages, each at
> > > -different locations will have differing ciphertexts. So swapping or
> > > +Currently this ioctl supports registering memory regions for two usages:
> > > +private memory and SEV-encrypted memory.
> > > +
> > > +When private memory is enabled, this ioctl is used to register guest 
> > > private
> > > +memory region and the addr/size of kvm_enc_region represents guest 
> > > physical
> > > +address (GPA). In this usage, this ioctl zaps the existing guest memory
> > > +mappings in KVM that fallen into the region.
> > > +
> > > +When SEV-encrypted memory is enabled, this ioctl is used to register 
> > > guest
> > > +memory region which may contain encrypted data for a SEV-enabled guest. 
> > > The
> > > +addr/size of kvm_enc_region represents userspace address (HVA). The SEV
> > > +memory encryption engine uses a tweak such that two identical plaintext 
> > > pages,
> > > +each at different locations will have differing ciphertexts. So swapping 
> > > or
> > >  moving ciphertext of those pages will not result in plaintext being
> > >  swapped. So relocating (or migrating) physical backing pages for the SEV
> > >  guest will require some additional steps.
> > > diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> > > index 8d2bd455c0cd..73fdfa429b20 100644
> > > --- a/arch/x86/kvm/Kconfig
> > > +++ b/arch/x86/kvm/Kconfig
> > > @@ -51,6 +51,7 @@ config KVM
> > >   select HAVE_KVM_PM_NOTIFIER if PM
> > >   select HAVE_KVM_RESTRICTED_MEM if X86_64
> > >   select RESTRICTEDMEM if HAVE_KVM_RESTRICTED_MEM
> > > + select KVM_GENERIC_PRIVATE_MEM if HAVE_KVM_RESTRICTED_MEM
> > >   help
> > > Support hosting fully virtualized guest machines using hardware
> > > virtualization extensions.  You will need a fairly recent
> > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > > index 79e5cbc35fcf..4ce98fa0153c 100644
> > > --- a/include/linux/kvm_host.h
> > > +++ b/include/linux/kvm_host.h
> > > @@ -245,7 +245,8 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t 
> > > cr2_or_gpa,
> > >  int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
> > >  #endif
> > >
> > > -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> > > +
> > > +#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || 
> > > defined(CONFIG_KVM_GENERIC_PRIVATE_MEM)
> > >  struct kvm_gfn_range {
> > >   struct kvm_memory_slot *slot;
> > >   gfn_t start;
> > > @@ -254,6 +255,9 @@ struct kvm_gfn_range {
> > >   bool may_block;
> > >  };
> > >  bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
> > > +#endif
> > > +
> > > +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> > >  bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> > >  bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> > >  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> > > @@ -794,6 +798,9 @@ struct kvm {
> > >   struct notifier_block pm_notifier;
> > >  #endif
> > >   char stats_id[KVM_STATS_NAME_SIZE];
> > > +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> > > + struct xarray mem_attr_array;
> > > +#endif
> > >  };
> > >
> > >  #define kvm_err(fmt, ...) \
> > > @@ -1453,6 +1460,7 @@ bool 

Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-08 Thread Chao Peng
On Tue, Nov 08, 2022 at 09:35:06AM +0800, Yuan Yao wrote:
> On Tue, Oct 25, 2022 at 11:13:41PM +0800, Chao Peng wrote:
> > Introduce generic private memory register/unregister by reusing existing
> > SEV ioctls KVM_MEMORY_ENCRYPT_{UN,}REG_REGION. It differs from SEV case
> > by treating address in the region as gpa instead of hva. Which cases
> > should these ioctls go is determined by the kvm_arch_has_private_mem().
> > Architecture which supports KVM_PRIVATE_MEM should override this function.
> >
> > KVM internally defaults all guest memory as private memory and maintain
> > the shared memory in 'mem_attr_array'. The above ioctls operate on this
> > field and unmap existing mappings if any.
> >
> > Signed-off-by: Chao Peng 
> > ---
> >  Documentation/virt/kvm/api.rst |  17 ++-
> >  arch/x86/kvm/Kconfig   |   1 +
> >  include/linux/kvm_host.h   |  10 +-
> >  virt/kvm/Kconfig   |   4 +
> >  virt/kvm/kvm_main.c| 227 +
> >  5 files changed, 198 insertions(+), 61 deletions(-)
> >
> > diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> > index 975688912b8c..08253cf498d1 100644
> > --- a/Documentation/virt/kvm/api.rst
> > +++ b/Documentation/virt/kvm/api.rst
> > @@ -4717,10 +4717,19 @@ 
> > Documentation/virt/kvm/x86/amd-memory-encryption.rst.
> >  This ioctl can be used to register a guest memory region which may
> >  contain encrypted data (e.g. guest RAM, SMRAM etc).
> >
> > -It is used in the SEV-enabled guest. When encryption is enabled, a guest
> > -memory region may contain encrypted data. The SEV memory encryption
> > -engine uses a tweak such that two identical plaintext pages, each at
> > -different locations will have differing ciphertexts. So swapping or
> > +Currently this ioctl supports registering memory regions for two usages:
> > +private memory and SEV-encrypted memory.
> > +
> > +When private memory is enabled, this ioctl is used to register guest 
> > private
> > +memory region and the addr/size of kvm_enc_region represents guest physical
> > +address (GPA). In this usage, this ioctl zaps the existing guest memory
> > +mappings in KVM that fallen into the region.
> > +
> > +When SEV-encrypted memory is enabled, this ioctl is used to register guest
> > +memory region which may contain encrypted data for a SEV-enabled guest. The
> > +addr/size of kvm_enc_region represents userspace address (HVA). The SEV
> > +memory encryption engine uses a tweak such that two identical plaintext 
> > pages,
> > +each at different locations will have differing ciphertexts. So swapping or
> >  moving ciphertext of those pages will not result in plaintext being
> >  swapped. So relocating (or migrating) physical backing pages for the SEV
> >  guest will require some additional steps.
> > diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> > index 8d2bd455c0cd..73fdfa429b20 100644
> > --- a/arch/x86/kvm/Kconfig
> > +++ b/arch/x86/kvm/Kconfig
> > @@ -51,6 +51,7 @@ config KVM
> > select HAVE_KVM_PM_NOTIFIER if PM
> > select HAVE_KVM_RESTRICTED_MEM if X86_64
> > select RESTRICTEDMEM if HAVE_KVM_RESTRICTED_MEM
> > +   select KVM_GENERIC_PRIVATE_MEM if HAVE_KVM_RESTRICTED_MEM
> > help
> >   Support hosting fully virtualized guest machines using hardware
> >   virtualization extensions.  You will need a fairly recent
> > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > index 79e5cbc35fcf..4ce98fa0153c 100644
> > --- a/include/linux/kvm_host.h
> > +++ b/include/linux/kvm_host.h
> > @@ -245,7 +245,8 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t 
> > cr2_or_gpa,
> >  int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
> >  #endif
> >
> > -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> > +
> > +#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || 
> > defined(CONFIG_KVM_GENERIC_PRIVATE_MEM)
> >  struct kvm_gfn_range {
> > struct kvm_memory_slot *slot;
> > gfn_t start;
> > @@ -254,6 +255,9 @@ struct kvm_gfn_range {
> > bool may_block;
> >  };
> >  bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
> > +#endif
> > +
> > +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> >  bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> >  bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> >  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> > @@ -794,6 +798,9 @@ struct kvm {
> > struct notifier_block pm_notifier;
> >  #endif
> > char stats_id[KVM_STATS_NAME_SIZE];
> > +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> > +   struct xarray mem_attr_array;
> > +#endif
> >  };
> >
> >  #define kvm_err(fmt, ...) \
> > @@ -1453,6 +1460,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct 
> > kvm_vcpu *vcpu);
> >  int kvm_arch_post_init_vm(struct kvm *kvm);
> >  void kvm_arch_pre_destroy_vm(struct kvm *kvm);
> >  int kvm_arch_create_vm_debugfs(struct kvm *kvm);
> > +bool kvm_arch_has_private_mem(struct kvm *kvm);
> >
> >  #ifndef 

Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-08 Thread Chao Peng
On Fri, Nov 04, 2022 at 09:19:31PM +, Sean Christopherson wrote:
> Paolo, any thoughts before I lead things further astray?
> 
> On Fri, Nov 04, 2022, Chao Peng wrote:
> > On Thu, Nov 03, 2022 at 11:04:53PM +, Sean Christopherson wrote:
> > > On Tue, Oct 25, 2022, Chao Peng wrote:
> > > > @@ -4708,6 +4802,24 @@ static long kvm_vm_ioctl(struct file *filp,
> > > > r = kvm_vm_ioctl_set_memory_region(kvm, );
> > > > break;
> > > > }
> > > > +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> > > > +   case KVM_MEMORY_ENCRYPT_REG_REGION:
> > > > +   case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
> > > 
> > > I'm having second thoughts about usurping 
> > > KVM_MEMORY_ENCRYPT_(UN)REG_REGION.  Aside
> > > from the fact that restricted/protected memory may not be encrypted, 
> > > there are
> > > other potential use cases for per-page memory attributes[*], e.g. to make 
> > > memory
> > > read-only (or no-exec, or exec-only, etc...) without having to modify 
> > > memslots.
> > > 
> > > Any paravirt use case where the attributes of a page are effectively 
> > > dictated by
> > > the guest is going to run into the exact same performance problems with 
> > > memslots,
> > > which isn't suprising in hindsight since shared vs. private is really 
> > > just an
> > > attribute, albeit with extra special semantics.
> > > 
> > > And if we go with a brand new ioctl(), maybe someday in the very distant 
> > > future
> > > we can deprecate and delete KVM_MEMORY_ENCRYPT_(UN)REG_REGION.
> > > 
> > > Switching to a new ioctl() should be a minor change, i.e. shouldn't throw 
> > > too big
> > > of a wrench into things.
> > > 
> > > Something like:
> > > 
> > >   KVM_SET_MEMORY_ATTRIBUTES
> > > 
> > >   struct kvm_memory_attributes {
> > >   __u64 address;
> > >   __u64 size;
> > >   __u64 flags;
> 
> Oh, this is half-baked.  I lost track of which flags were which.  What I 
> intended
> was a separate, initially-unused flags, e.g.

That makes sense.

> 
>  struct kvm_memory_attributes {
>   __u64 address;
>   __u64 size;
>   __u64 attributes;
>   __u64 flags;
>   }
> 
> so that KVM can tweak behavior and/or extend the effective size of the struct.
> 
> > I like the idea of adding a new ioctl(). But putting all attributes into
> > a flags in uAPI sounds not good to me, e.g. forcing userspace to set all
> > attributes in one call can cause pain for userspace, probably for KVM
> > implementation as well. For private<->shared memory conversion, we
> > actually only care the KVM_MEM_ATTR_SHARED or KVM_MEM_ATTR_PRIVATE bit,
> 
> Not necessarily, e.g. I can see pKVM wanting to convert from RW+PRIVATE => 
> RO+SHARED
> or even RW+PRIVATE => NONE+SHARED so that the guest can't write/access the 
> memory
> while it's accessible from the host.
> 
> And if this does extend beyond shared/private, dropping from RWX=>R, i.e. 
> dropping
> WX permissions, would also be a common operation.
> 
> Hmm, typing that out makes me think that if we do end up supporting other 
> "attributes",
> i.e. protections, we should go straight to full RWX protections instead of 
> doing
> things piecemeal, i.e. add individual protections instead of combinations like
> NO_EXEC and READ_ONLY.  The protections would have to be inverted for 
> backwards
> compatibility, but that's easy enough to handle.  The semantics could be like
> protection keys, which also have inverted persmissions, where the final 
> protections
> are the combination of memslot+attributes, i.e. a read-only memslot couldn't 
> be made
> writable via attributes.
> 
> E.g. userspace could do "NO_READ | NO_WRITE | NO_EXEC" to temporarily block 
> access
> to memory without needing to delete the memslot.  KVM would need to disallow
> unsupported combinations, e.g. disallowed effective protections would be:
> 
>   - W or WX [unless there's an arch that supports write-only memory]
>   - R or RW [until KVM plumbs through support for no-exec, or it's 
> unsupported in hardware]
>   - X   [until KVM plumbs through support for exec-only, or it's 
> unsupported in hardware]
> 
> Anyways, that's all future work...
> 
> > but we force userspace to set other irrelevant bits as well if use this
> > API.
> 
> They aren't irrelevant though, as the memory attributes are all describing the
> allowed protections for a given page.

The 'allowed' protections seems answer my concern. But after we enabled
"NO_READ | NO_WRITE | NO_EXEC", are we going to check "NO_READ |
NO_WRITE | NO_EXEC" are also set together with the PRIVATE bit? I just
can't imagine what the semantic would be if we have the PRIVATE bit set
but other bits indicate it's actually can READ/WRITE/EXEC from usrspace.

> If there's a use case where userspace "can't"
> keep track of the attributes for whatever reason, then userspace could do a 
> RMW
> to set/clear attributes.  Alternatively, the ioctl() could take an 
> "operation" and
> support WRITE/OR/AND to allow setting/clearing individual 

Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-07 Thread Yuan Yao
On Tue, Oct 25, 2022 at 11:13:41PM +0800, Chao Peng wrote:
> Introduce generic private memory register/unregister by reusing existing
> SEV ioctls KVM_MEMORY_ENCRYPT_{UN,}REG_REGION. It differs from SEV case
> by treating address in the region as gpa instead of hva. Which cases
> should these ioctls go is determined by the kvm_arch_has_private_mem().
> Architecture which supports KVM_PRIVATE_MEM should override this function.
>
> KVM internally defaults all guest memory as private memory and maintain
> the shared memory in 'mem_attr_array'. The above ioctls operate on this
> field and unmap existing mappings if any.
>
> Signed-off-by: Chao Peng 
> ---
>  Documentation/virt/kvm/api.rst |  17 ++-
>  arch/x86/kvm/Kconfig   |   1 +
>  include/linux/kvm_host.h   |  10 +-
>  virt/kvm/Kconfig   |   4 +
>  virt/kvm/kvm_main.c| 227 +
>  5 files changed, 198 insertions(+), 61 deletions(-)
>
> diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> index 975688912b8c..08253cf498d1 100644
> --- a/Documentation/virt/kvm/api.rst
> +++ b/Documentation/virt/kvm/api.rst
> @@ -4717,10 +4717,19 @@ Documentation/virt/kvm/x86/amd-memory-encryption.rst.
>  This ioctl can be used to register a guest memory region which may
>  contain encrypted data (e.g. guest RAM, SMRAM etc).
>
> -It is used in the SEV-enabled guest. When encryption is enabled, a guest
> -memory region may contain encrypted data. The SEV memory encryption
> -engine uses a tweak such that two identical plaintext pages, each at
> -different locations will have differing ciphertexts. So swapping or
> +Currently this ioctl supports registering memory regions for two usages:
> +private memory and SEV-encrypted memory.
> +
> +When private memory is enabled, this ioctl is used to register guest private
> +memory region and the addr/size of kvm_enc_region represents guest physical
> +address (GPA). In this usage, this ioctl zaps the existing guest memory
> +mappings in KVM that fallen into the region.
> +
> +When SEV-encrypted memory is enabled, this ioctl is used to register guest
> +memory region which may contain encrypted data for a SEV-enabled guest. The
> +addr/size of kvm_enc_region represents userspace address (HVA). The SEV
> +memory encryption engine uses a tweak such that two identical plaintext 
> pages,
> +each at different locations will have differing ciphertexts. So swapping or
>  moving ciphertext of those pages will not result in plaintext being
>  swapped. So relocating (or migrating) physical backing pages for the SEV
>  guest will require some additional steps.
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 8d2bd455c0cd..73fdfa429b20 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -51,6 +51,7 @@ config KVM
>   select HAVE_KVM_PM_NOTIFIER if PM
>   select HAVE_KVM_RESTRICTED_MEM if X86_64
>   select RESTRICTEDMEM if HAVE_KVM_RESTRICTED_MEM
> + select KVM_GENERIC_PRIVATE_MEM if HAVE_KVM_RESTRICTED_MEM
>   help
> Support hosting fully virtualized guest machines using hardware
> virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 79e5cbc35fcf..4ce98fa0153c 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -245,7 +245,8 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t 
> cr2_or_gpa,
>  int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
>  #endif
>
> -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> +
> +#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || 
> defined(CONFIG_KVM_GENERIC_PRIVATE_MEM)
>  struct kvm_gfn_range {
>   struct kvm_memory_slot *slot;
>   gfn_t start;
> @@ -254,6 +255,9 @@ struct kvm_gfn_range {
>   bool may_block;
>  };
>  bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
> +#endif
> +
> +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
>  bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
>  bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> @@ -794,6 +798,9 @@ struct kvm {
>   struct notifier_block pm_notifier;
>  #endif
>   char stats_id[KVM_STATS_NAME_SIZE];
> +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> + struct xarray mem_attr_array;
> +#endif
>  };
>
>  #define kvm_err(fmt, ...) \
> @@ -1453,6 +1460,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu 
> *vcpu);
>  int kvm_arch_post_init_vm(struct kvm *kvm);
>  void kvm_arch_pre_destroy_vm(struct kvm *kvm);
>  int kvm_arch_create_vm_debugfs(struct kvm *kvm);
> +bool kvm_arch_has_private_mem(struct kvm *kvm);
>
>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
>  /*
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index 9ff164c7e0cc..69ca59e82149 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -89,3 +89,7 @@ config HAVE_KVM_PM_NOTIFIER
>
>  config HAVE_KVM_RESTRICTED_MEM
>

Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-04 Thread Sean Christopherson
Paolo, any thoughts before I lead things further astray?

On Fri, Nov 04, 2022, Chao Peng wrote:
> On Thu, Nov 03, 2022 at 11:04:53PM +, Sean Christopherson wrote:
> > On Tue, Oct 25, 2022, Chao Peng wrote:
> > > @@ -4708,6 +4802,24 @@ static long kvm_vm_ioctl(struct file *filp,
> > >   r = kvm_vm_ioctl_set_memory_region(kvm, );
> > >   break;
> > >   }
> > > +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> > > + case KVM_MEMORY_ENCRYPT_REG_REGION:
> > > + case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
> > 
> > I'm having second thoughts about usurping 
> > KVM_MEMORY_ENCRYPT_(UN)REG_REGION.  Aside
> > from the fact that restricted/protected memory may not be encrypted, there 
> > are
> > other potential use cases for per-page memory attributes[*], e.g. to make 
> > memory
> > read-only (or no-exec, or exec-only, etc...) without having to modify 
> > memslots.
> > 
> > Any paravirt use case where the attributes of a page are effectively 
> > dictated by
> > the guest is going to run into the exact same performance problems with 
> > memslots,
> > which isn't suprising in hindsight since shared vs. private is really just 
> > an
> > attribute, albeit with extra special semantics.
> > 
> > And if we go with a brand new ioctl(), maybe someday in the very distant 
> > future
> > we can deprecate and delete KVM_MEMORY_ENCRYPT_(UN)REG_REGION.
> > 
> > Switching to a new ioctl() should be a minor change, i.e. shouldn't throw 
> > too big
> > of a wrench into things.
> > 
> > Something like:
> > 
> >   KVM_SET_MEMORY_ATTRIBUTES
> > 
> >   struct kvm_memory_attributes {
> > __u64 address;
> > __u64 size;
> > __u64 flags;

Oh, this is half-baked.  I lost track of which flags were which.  What I 
intended
was a separate, initially-unused flags, e.g.

 struct kvm_memory_attributes {
__u64 address;
__u64 size;
__u64 attributes;
__u64 flags;
  }

so that KVM can tweak behavior and/or extend the effective size of the struct.

> I like the idea of adding a new ioctl(). But putting all attributes into
> a flags in uAPI sounds not good to me, e.g. forcing userspace to set all
> attributes in one call can cause pain for userspace, probably for KVM
> implementation as well. For private<->shared memory conversion, we
> actually only care the KVM_MEM_ATTR_SHARED or KVM_MEM_ATTR_PRIVATE bit,

Not necessarily, e.g. I can see pKVM wanting to convert from RW+PRIVATE => 
RO+SHARED
or even RW+PRIVATE => NONE+SHARED so that the guest can't write/access the 
memory
while it's accessible from the host.

And if this does extend beyond shared/private, dropping from RWX=>R, i.e. 
dropping
WX permissions, would also be a common operation.

Hmm, typing that out makes me think that if we do end up supporting other 
"attributes",
i.e. protections, we should go straight to full RWX protections instead of doing
things piecemeal, i.e. add individual protections instead of combinations like
NO_EXEC and READ_ONLY.  The protections would have to be inverted for backwards
compatibility, but that's easy enough to handle.  The semantics could be like
protection keys, which also have inverted persmissions, where the final 
protections
are the combination of memslot+attributes, i.e. a read-only memslot couldn't be 
made
writable via attributes.

E.g. userspace could do "NO_READ | NO_WRITE | NO_EXEC" to temporarily block 
access
to memory without needing to delete the memslot.  KVM would need to disallow
unsupported combinations, e.g. disallowed effective protections would be:

  - W or WX [unless there's an arch that supports write-only memory]
  - R or RW [until KVM plumbs through support for no-exec, or it's unsupported 
in hardware]
  - X   [until KVM plumbs through support for exec-only, or it's 
unsupported in hardware]

Anyways, that's all future work...

> but we force userspace to set other irrelevant bits as well if use this
> API.

They aren't irrelevant though, as the memory attributes are all describing the
allowed protections for a given page.  If there's a use case where userspace 
"can't"
keep track of the attributes for whatever reason, then userspace could do a RMW
to set/clear attributes.  Alternatively, the ioctl() could take an "operation" 
and
support WRITE/OR/AND to allow setting/clearing individual flags, e.g. tweak the
above to be: 
 
 struct kvm_memory_attributes {
__u64 address;
__u64 size;
__u64 attributes;
__u32 operation;
__u32 flags;
  }

> I looked at kvm_device_attr, sounds we can do similar:

The device attributes deal with isolated, arbitrary values, whereas memory 
attributes
are flags, i.e. devices are 1:1 whereas memory is 1:MANY.  There is no "unset" 
for
device attributes, because they aren't flags.  Device attributes vs. memory 
attributes
really are two very different things that just happen to use a common name.

If it helped clarify things without creating naming problems, we could even use
PROTECTIONS instead of 

Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-04 Thread Chao Peng
On Thu, Nov 03, 2022 at 11:04:53PM +, Sean Christopherson wrote:
> On Tue, Oct 25, 2022, Chao Peng wrote:
> > @@ -4708,6 +4802,24 @@ static long kvm_vm_ioctl(struct file *filp,
> > r = kvm_vm_ioctl_set_memory_region(kvm, );
> > break;
> > }
> > +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> > +   case KVM_MEMORY_ENCRYPT_REG_REGION:
> > +   case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
> 
> I'm having second thoughts about usurping KVM_MEMORY_ENCRYPT_(UN)REG_REGION.  
> Aside
> from the fact that restricted/protected memory may not be encrypted, there are
> other potential use cases for per-page memory attributes[*], e.g. to make 
> memory
> read-only (or no-exec, or exec-only, etc...) without having to modify 
> memslots.
> 
> Any paravirt use case where the attributes of a page are effectively dictated 
> by
> the guest is going to run into the exact same performance problems with 
> memslots,
> which isn't suprising in hindsight since shared vs. private is really just an
> attribute, albeit with extra special semantics.
> 
> And if we go with a brand new ioctl(), maybe someday in the very distant 
> future
> we can deprecate and delete KVM_MEMORY_ENCRYPT_(UN)REG_REGION.
> 
> Switching to a new ioctl() should be a minor change, i.e. shouldn't throw too 
> big
> of a wrench into things.
> 
> Something like:
> 
>   KVM_SET_MEMORY_ATTRIBUTES
> 
>   struct kvm_memory_attributes {
>   __u64 address;
>   __u64 size;
>   __u64 flags;
>   }

I like the idea of adding a new ioctl(). But putting all attributes into
a flags in uAPI sounds not good to me, e.g. forcing userspace to set all
attributes in one call can cause pain for userspace, probably for KVM
implementation as well. For private<->shared memory conversion, we
actually only care the KVM_MEM_ATTR_SHARED or KVM_MEM_ATTR_PRIVATE bit,
but we force userspace to set other irrelevant bits as well if use this
API.

I looked at kvm_device_attr, sounds we can do similar:

  KVM_SET_MEMORY_ATTR

  struct kvm_memory_attr {
__u64 address;
__u64 size;
#define KVM_MEM_ATTR_SHARED BIT(0)
#define KVM_MEM_ATTR_READONLY   BIT(1)
#define KVM_MEM_ATTR_NOEXEC BIT(2)
__u32 attr;
__u32 pad;
  }

I'm not sure if we need KVM_GET_MEMORY_ATTR/KVM_HAS_MEMORY_ATTR as well,
but sounds like we need a KVM_UNSET_MEMORY_ATTR.

Since we are exposing the attribute directly to userspace I also think
we'd better treat shared memory as the default, so even when the private
memory is not used, the bit can still be meaningful. So define BIT(0) as
KVM_MEM_ATTR_PRIVATE instead of KVM_MEM_ATTR_SHARED.

Thanks,
Chao

> 
> [*] https://lore.kernel.org/all/y1a1i9vbj%2fpvm...@google.com
> 
> > +   struct kvm_enc_region region;
> > +   bool set = ioctl == KVM_MEMORY_ENCRYPT_REG_REGION;
> > +
> > +   if (!kvm_arch_has_private_mem(kvm))
> > +   goto arch_vm_ioctl;
> > +
> > +   r = -EFAULT;
> > +   if (copy_from_user(, argp, sizeof(region)))
> > +   goto out;
> > +
> > +   r = kvm_vm_ioctl_set_mem_attr(kvm, region.addr,
> > + region.size, set);
> > +   break;
> > +   }
> > +#endif



Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-11-03 Thread Sean Christopherson
On Tue, Oct 25, 2022, Chao Peng wrote:
> @@ -4708,6 +4802,24 @@ static long kvm_vm_ioctl(struct file *filp,
>   r = kvm_vm_ioctl_set_memory_region(kvm, );
>   break;
>   }
> +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> + case KVM_MEMORY_ENCRYPT_REG_REGION:
> + case KVM_MEMORY_ENCRYPT_UNREG_REGION: {

I'm having second thoughts about usurping KVM_MEMORY_ENCRYPT_(UN)REG_REGION.  
Aside
from the fact that restricted/protected memory may not be encrypted, there are
other potential use cases for per-page memory attributes[*], e.g. to make memory
read-only (or no-exec, or exec-only, etc...) without having to modify memslots.

Any paravirt use case where the attributes of a page are effectively dictated by
the guest is going to run into the exact same performance problems with 
memslots,
which isn't suprising in hindsight since shared vs. private is really just an
attribute, albeit with extra special semantics.

And if we go with a brand new ioctl(), maybe someday in the very distant future
we can deprecate and delete KVM_MEMORY_ENCRYPT_(UN)REG_REGION.

Switching to a new ioctl() should be a minor change, i.e. shouldn't throw too 
big
of a wrench into things.

Something like:

  KVM_SET_MEMORY_ATTRIBUTES

  struct kvm_memory_attributes {
__u64 address;
__u64 size;
__u64 flags;
  }

[*] https://lore.kernel.org/all/y1a1i9vbj%2fpvm...@google.com

> + struct kvm_enc_region region;
> + bool set = ioctl == KVM_MEMORY_ENCRYPT_REG_REGION;
> +
> + if (!kvm_arch_has_private_mem(kvm))
> + goto arch_vm_ioctl;
> +
> + r = -EFAULT;
> + if (copy_from_user(, argp, sizeof(region)))
> + goto out;
> +
> + r = kvm_vm_ioctl_set_mem_attr(kvm, region.addr,
> +   region.size, set);
> + break;
> + }
> +#endif



Re: [PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-10-27 Thread Fuad Tabba
Hi,

On Tue, Oct 25, 2022 at 4:19 PM Chao Peng  wrote:
>
> Introduce generic private memory register/unregister by reusing existing
> SEV ioctls KVM_MEMORY_ENCRYPT_{UN,}REG_REGION. It differs from SEV case
> by treating address in the region as gpa instead of hva. Which cases
> should these ioctls go is determined by the kvm_arch_has_private_mem().
> Architecture which supports KVM_PRIVATE_MEM should override this function.
>
> KVM internally defaults all guest memory as private memory and maintain
> the shared memory in 'mem_attr_array'. The above ioctls operate on this
> field and unmap existing mappings if any.
>
> Signed-off-by: Chao Peng 
> ---

Reviewed-by: Fuad Tabba 

Cheers,
/fuad


>  Documentation/virt/kvm/api.rst |  17 ++-
>  arch/x86/kvm/Kconfig   |   1 +
>  include/linux/kvm_host.h   |  10 +-
>  virt/kvm/Kconfig   |   4 +
>  virt/kvm/kvm_main.c| 227 +
>  5 files changed, 198 insertions(+), 61 deletions(-)
>
> diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> index 975688912b8c..08253cf498d1 100644
> --- a/Documentation/virt/kvm/api.rst
> +++ b/Documentation/virt/kvm/api.rst
> @@ -4717,10 +4717,19 @@ Documentation/virt/kvm/x86/amd-memory-encryption.rst.
>  This ioctl can be used to register a guest memory region which may
>  contain encrypted data (e.g. guest RAM, SMRAM etc).
>
> -It is used in the SEV-enabled guest. When encryption is enabled, a guest
> -memory region may contain encrypted data. The SEV memory encryption
> -engine uses a tweak such that two identical plaintext pages, each at
> -different locations will have differing ciphertexts. So swapping or
> +Currently this ioctl supports registering memory regions for two usages:
> +private memory and SEV-encrypted memory.
> +
> +When private memory is enabled, this ioctl is used to register guest private
> +memory region and the addr/size of kvm_enc_region represents guest physical
> +address (GPA). In this usage, this ioctl zaps the existing guest memory
> +mappings in KVM that fallen into the region.
> +
> +When SEV-encrypted memory is enabled, this ioctl is used to register guest
> +memory region which may contain encrypted data for a SEV-enabled guest. The
> +addr/size of kvm_enc_region represents userspace address (HVA). The SEV
> +memory encryption engine uses a tweak such that two identical plaintext 
> pages,
> +each at different locations will have differing ciphertexts. So swapping or
>  moving ciphertext of those pages will not result in plaintext being
>  swapped. So relocating (or migrating) physical backing pages for the SEV
>  guest will require some additional steps.
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 8d2bd455c0cd..73fdfa429b20 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -51,6 +51,7 @@ config KVM
> select HAVE_KVM_PM_NOTIFIER if PM
> select HAVE_KVM_RESTRICTED_MEM if X86_64
> select RESTRICTEDMEM if HAVE_KVM_RESTRICTED_MEM
> +   select KVM_GENERIC_PRIVATE_MEM if HAVE_KVM_RESTRICTED_MEM
> help
>   Support hosting fully virtualized guest machines using hardware
>   virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 79e5cbc35fcf..4ce98fa0153c 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -245,7 +245,8 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t 
> cr2_or_gpa,
>  int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
>  #endif
>
> -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> +
> +#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || 
> defined(CONFIG_KVM_GENERIC_PRIVATE_MEM)
>  struct kvm_gfn_range {
> struct kvm_memory_slot *slot;
> gfn_t start;
> @@ -254,6 +255,9 @@ struct kvm_gfn_range {
> bool may_block;
>  };
>  bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
> +#endif
> +
> +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
>  bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
>  bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> @@ -794,6 +798,9 @@ struct kvm {
> struct notifier_block pm_notifier;
>  #endif
> char stats_id[KVM_STATS_NAME_SIZE];
> +#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
> +   struct xarray mem_attr_array;
> +#endif
>  };
>
>  #define kvm_err(fmt, ...) \
> @@ -1453,6 +1460,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu 
> *vcpu);
>  int kvm_arch_post_init_vm(struct kvm *kvm);
>  void kvm_arch_pre_destroy_vm(struct kvm *kvm);
>  int kvm_arch_create_vm_debugfs(struct kvm *kvm);
> +bool kvm_arch_has_private_mem(struct kvm *kvm);
>
>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
>  /*
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index 9ff164c7e0cc..69ca59e82149 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -89,3 +89,7 @@ 

[PATCH v9 5/8] KVM: Register/unregister the guest private memory regions

2022-10-25 Thread Chao Peng
Introduce generic private memory register/unregister by reusing existing
SEV ioctls KVM_MEMORY_ENCRYPT_{UN,}REG_REGION. It differs from SEV case
by treating address in the region as gpa instead of hva. Which cases
should these ioctls go is determined by the kvm_arch_has_private_mem().
Architecture which supports KVM_PRIVATE_MEM should override this function.

KVM internally defaults all guest memory as private memory and maintain
the shared memory in 'mem_attr_array'. The above ioctls operate on this
field and unmap existing mappings if any.

Signed-off-by: Chao Peng 
---
 Documentation/virt/kvm/api.rst |  17 ++-
 arch/x86/kvm/Kconfig   |   1 +
 include/linux/kvm_host.h   |  10 +-
 virt/kvm/Kconfig   |   4 +
 virt/kvm/kvm_main.c| 227 +
 5 files changed, 198 insertions(+), 61 deletions(-)

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 975688912b8c..08253cf498d1 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4717,10 +4717,19 @@ Documentation/virt/kvm/x86/amd-memory-encryption.rst.
 This ioctl can be used to register a guest memory region which may
 contain encrypted data (e.g. guest RAM, SMRAM etc).
 
-It is used in the SEV-enabled guest. When encryption is enabled, a guest
-memory region may contain encrypted data. The SEV memory encryption
-engine uses a tweak such that two identical plaintext pages, each at
-different locations will have differing ciphertexts. So swapping or
+Currently this ioctl supports registering memory regions for two usages:
+private memory and SEV-encrypted memory.
+
+When private memory is enabled, this ioctl is used to register guest private
+memory region and the addr/size of kvm_enc_region represents guest physical
+address (GPA). In this usage, this ioctl zaps the existing guest memory
+mappings in KVM that fallen into the region.
+
+When SEV-encrypted memory is enabled, this ioctl is used to register guest
+memory region which may contain encrypted data for a SEV-enabled guest. The
+addr/size of kvm_enc_region represents userspace address (HVA). The SEV
+memory encryption engine uses a tweak such that two identical plaintext pages,
+each at different locations will have differing ciphertexts. So swapping or
 moving ciphertext of those pages will not result in plaintext being
 swapped. So relocating (or migrating) physical backing pages for the SEV
 guest will require some additional steps.
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8d2bd455c0cd..73fdfa429b20 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -51,6 +51,7 @@ config KVM
select HAVE_KVM_PM_NOTIFIER if PM
select HAVE_KVM_RESTRICTED_MEM if X86_64
select RESTRICTEDMEM if HAVE_KVM_RESTRICTED_MEM
+   select KVM_GENERIC_PRIVATE_MEM if HAVE_KVM_RESTRICTED_MEM
help
  Support hosting fully virtualized guest machines using hardware
  virtualization extensions.  You will need a fairly recent
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 79e5cbc35fcf..4ce98fa0153c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -245,7 +245,8 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t 
cr2_or_gpa,
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+
+#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || 
defined(CONFIG_KVM_GENERIC_PRIVATE_MEM)
 struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
@@ -254,6 +255,9 @@ struct kvm_gfn_range {
bool may_block;
 };
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+#endif
+
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -794,6 +798,9 @@ struct kvm {
struct notifier_block pm_notifier;
 #endif
char stats_id[KVM_STATS_NAME_SIZE];
+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
+   struct xarray mem_attr_array;
+#endif
 };
 
 #define kvm_err(fmt, ...) \
@@ -1453,6 +1460,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu 
*vcpu);
 int kvm_arch_post_init_vm(struct kvm *kvm);
 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
 int kvm_arch_create_vm_debugfs(struct kvm *kvm);
+bool kvm_arch_has_private_mem(struct kvm *kvm);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 /*
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 9ff164c7e0cc..69ca59e82149 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -89,3 +89,7 @@ config HAVE_KVM_PM_NOTIFIER
 
 config HAVE_KVM_RESTRICTED_MEM
bool
+
+config KVM_GENERIC_PRIVATE_MEM
+   bool
+   depends on HAVE_KVM_RESTRICTED_MEM
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 09c9cdeb773c..fc3835826ace 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c