Re: [PATCH v10 8/9] KVM: Handle page fault for private memory

2023-01-13 Thread Sean Christopherson
On Fri, Dec 02, 2022, Chao Peng wrote:
> @@ -5599,6 +5652,9 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, 
> gpa_t cr2_or_gpa, u64 err
>   return -EIO;
>   }
>  
> + if (r == RET_PF_USER)
> + return 0;
> +
>   if (r < 0)
>   return r;
>   if (r != RET_PF_EMULATE)
> @@ -6452,7 +6508,8 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm 
> *kvm,
>*/
>   if (sp->role.direct &&
>   sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, 
> sp->gfn,
> -PG_LEVEL_NUM)) {
> +PG_LEVEL_NUM,
> +false)) {

Passing %false is incorrect.  It might not cause problems because KVM currently
doesn't allowing modifying private memslots (that likely needs to change to 
allow
dirty logging), but it's wrong since nothing guarantees KVM is operating on 
SPTEs
for shared memory.

One option would be to take the patches from the TDX series that add a "private"
flag to the shadow page role, but I'd rather not add the role until it's truly
necessary.

For now, I think we can do this without impacting performance of guests that 
don't
support private memory.

int kvm_mmu_max_mapping_level(struct kvm *kvm,
  const struct kvm_memory_slot *slot, gfn_t gfn,
  int max_level)
{
bool is_private = kvm_slot_can_be_private(slot) &&
  kvm_mem_is_private(kvm, gfn);

return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, 
is_private);
}

> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 25099c94e770..153842bb33df 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -2335,4 +2335,34 @@ static inline void 
> kvm_arch_set_memory_attributes(struct kvm *kvm,
>  }
>  #endif /* __KVM_HAVE_ARCH_SET_MEMORY_ATTRIBUTES */
>  
> +#ifdef CONFIG_HAVE_KVM_MEMORY_ATTRIBUTES
> +static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
> +{

This code, i.e. the generic KVM changes, belongs in a separate patch.  It'll be
small, but I want to separate x86's page fault changes from the restrictedmem
support adding to common KVM.

This should also short-circuit based on CONFIG_HAVE_KVM_RESTRICTED_MEM, though
I would name that CONFIG_KVM_PRIVATE_MEMORY since in KVM's world, it's all about
private vs. shared at this time.

> + return xa_to_value(xa_load(>mem_attr_array, gfn)) &
> +KVM_MEMORY_ATTRIBUTE_PRIVATE;
> +}
> +#else
> +static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
> +{
> + return false;
> +}
> +
> +#endif /* CONFIG_HAVE_KVM_MEMORY_ATTRIBUTES */
> +
> +#ifdef CONFIG_HAVE_KVM_RESTRICTED_MEM
> +static inline int kvm_restricted_mem_get_pfn(struct kvm_memory_slot *slot,
> + gfn_t gfn, kvm_pfn_t *pfn, int *order)
> +{
> + int ret;
> + struct page *page;
> + pgoff_t index = gfn - slot->base_gfn +
> + (slot->restricted_offset >> PAGE_SHIFT);
> +
> + ret = restrictedmem_get_page(slot->restricted_file, index,
> +  , order);

This needs handle errors.  If "ret" is non-zero, "page" is garbage.

> + *pfn = page_to_pfn(page);
> + return ret;
> +}
> +#endif /* CONFIG_HAVE_KVM_RESTRICTED_MEM */
> +
>  #endif
> -- 
> 2.25.1
> 



Re: [PATCH v10 8/9] KVM: Handle page fault for private memory

2022-12-11 Thread Chao Peng
On Fri, Dec 09, 2022 at 09:01:04AM +, Fuad Tabba wrote:
> Hi,
> 
> On Fri, Dec 2, 2022 at 6:19 AM Chao Peng  wrote:
> >
> > A KVM_MEM_PRIVATE memslot can include both fd-based private memory and
> > hva-based shared memory. Architecture code (like TDX code) can tell
> > whether the on-going fault is private or not. This patch adds a
> > 'is_private' field to kvm_page_fault to indicate this and architecture
> > code is expected to set it.
> >
> > To handle page fault for such memslot, the handling logic is different
> > depending on whether the fault is private or shared. KVM checks if
> > 'is_private' matches the host's view of the page (maintained in
> > mem_attr_array).
> >   - For a successful match, private pfn is obtained with
> > restrictedmem_get_page() and shared pfn is obtained with existing
> > get_user_pages().
> >   - For a failed match, KVM causes a KVM_EXIT_MEMORY_FAULT exit to
> > userspace. Userspace then can convert memory between private/shared
> > in host's view and retry the fault.
> >
> > Co-developed-by: Yu Zhang 
> > Signed-off-by: Yu Zhang 
> > Signed-off-by: Chao Peng 
> > ---
> >  arch/x86/kvm/mmu/mmu.c  | 63 +++--
> >  arch/x86/kvm/mmu/mmu_internal.h | 14 +++-
> >  arch/x86/kvm/mmu/mmutrace.h |  1 +
> >  arch/x86/kvm/mmu/tdp_mmu.c  |  2 +-
> >  include/linux/kvm_host.h| 30 
> >  5 files changed, 105 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 2190fd8c95c0..b1953ebc012e 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -3058,7 +3058,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, 
> > gfn_t gfn,
> >
> >  int kvm_mmu_max_mapping_level(struct kvm *kvm,
> >   const struct kvm_memory_slot *slot, gfn_t gfn,
> > - int max_level)
> > + int max_level, bool is_private)
> >  {
> > struct kvm_lpage_info *linfo;
> > int host_level;
> > @@ -3070,6 +3070,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
> > break;
> > }
> >
> > +   if (is_private)
> > +   return max_level;
> > +
> > if (max_level == PG_LEVEL_4K)
> > return PG_LEVEL_4K;
> >
> > @@ -3098,7 +3101,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, 
> > struct kvm_page_fault *fault
> >  * level, which will be used to do precise, accurate accounting.
> >  */
> > fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
> > -fault->gfn, 
> > fault->max_level);
> > +fault->gfn, 
> > fault->max_level,
> > +fault->is_private);
> > if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
> > return;
> >
> > @@ -4178,6 +4182,49 @@ void kvm_arch_async_page_ready(struct kvm_vcpu 
> > *vcpu, struct kvm_async_pf *work)
> > kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
> >  }
> >
> > +static inline u8 order_to_level(int order)
> > +{
> > +   BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> > +
> > +   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> > +   return PG_LEVEL_1G;
> > +
> > +   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> > +   return PG_LEVEL_2M;
> > +
> > +   return PG_LEVEL_4K;
> > +}
> > +
> > +static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
> > +   struct kvm_page_fault *fault)
> > +{
> > +   vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
> > +   if (fault->is_private)
> > +   vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
> > +   else
> > +   vcpu->run->memory.flags = 0;
> > +   vcpu->run->memory.gpa = fault->gfn << PAGE_SHIFT;
> 
> nit: As in previous patches, use helpers (for this and other similar
> shifts in this patch)?

Agreed.

> 
> > +   vcpu->run->memory.size = PAGE_SIZE;
> > +   return RET_PF_USER;
> > +}
> > +
> > +static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> > +  struct kvm_page_fault *fault)
> > +{
> > +   int order;
> > +   struct kvm_memory_slot *slot = fault->slot;
> > +
> > +   if (!kvm_slot_can_be_private(slot))
> > +   return kvm_do_memory_fault_exit(vcpu, fault);
> > +
> > +   if (kvm_restricted_mem_get_pfn(slot, fault->gfn, >pfn, 
> > ))
> > +   return RET_PF_RETRY;
> > +
> > +   fault->max_level = min(order_to_level(order), fault->max_level);
> > +   fault->map_writable = !(slot->flags & KVM_MEM_READONLY);
> > +   return RET_PF_CONTINUE;
> > +}
> > +
> >  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault 
> > *fault)
> >  {
> > 

Re: [PATCH v10 8/9] KVM: Handle page fault for private memory

2022-12-09 Thread Fuad Tabba
Hi,

On Fri, Dec 2, 2022 at 6:19 AM Chao Peng  wrote:
>
> A KVM_MEM_PRIVATE memslot can include both fd-based private memory and
> hva-based shared memory. Architecture code (like TDX code) can tell
> whether the on-going fault is private or not. This patch adds a
> 'is_private' field to kvm_page_fault to indicate this and architecture
> code is expected to set it.
>
> To handle page fault for such memslot, the handling logic is different
> depending on whether the fault is private or shared. KVM checks if
> 'is_private' matches the host's view of the page (maintained in
> mem_attr_array).
>   - For a successful match, private pfn is obtained with
> restrictedmem_get_page() and shared pfn is obtained with existing
> get_user_pages().
>   - For a failed match, KVM causes a KVM_EXIT_MEMORY_FAULT exit to
> userspace. Userspace then can convert memory between private/shared
> in host's view and retry the fault.
>
> Co-developed-by: Yu Zhang 
> Signed-off-by: Yu Zhang 
> Signed-off-by: Chao Peng 
> ---
>  arch/x86/kvm/mmu/mmu.c  | 63 +++--
>  arch/x86/kvm/mmu/mmu_internal.h | 14 +++-
>  arch/x86/kvm/mmu/mmutrace.h |  1 +
>  arch/x86/kvm/mmu/tdp_mmu.c  |  2 +-
>  include/linux/kvm_host.h| 30 
>  5 files changed, 105 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 2190fd8c95c0..b1953ebc012e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3058,7 +3058,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, 
> gfn_t gfn,
>
>  int kvm_mmu_max_mapping_level(struct kvm *kvm,
>   const struct kvm_memory_slot *slot, gfn_t gfn,
> - int max_level)
> + int max_level, bool is_private)
>  {
> struct kvm_lpage_info *linfo;
> int host_level;
> @@ -3070,6 +3070,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
> break;
> }
>
> +   if (is_private)
> +   return max_level;
> +
> if (max_level == PG_LEVEL_4K)
> return PG_LEVEL_4K;
>
> @@ -3098,7 +3101,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, 
> struct kvm_page_fault *fault
>  * level, which will be used to do precise, accurate accounting.
>  */
> fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
> -fault->gfn, 
> fault->max_level);
> +fault->gfn, 
> fault->max_level,
> +fault->is_private);
> if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
> return;
>
> @@ -4178,6 +4182,49 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 
> struct kvm_async_pf *work)
> kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
>  }
>
> +static inline u8 order_to_level(int order)
> +{
> +   BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> +
> +   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> +   return PG_LEVEL_1G;
> +
> +   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> +   return PG_LEVEL_2M;
> +
> +   return PG_LEVEL_4K;
> +}
> +
> +static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
> +   struct kvm_page_fault *fault)
> +{
> +   vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
> +   if (fault->is_private)
> +   vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
> +   else
> +   vcpu->run->memory.flags = 0;
> +   vcpu->run->memory.gpa = fault->gfn << PAGE_SHIFT;

nit: As in previous patches, use helpers (for this and other similar
shifts in this patch)?

> +   vcpu->run->memory.size = PAGE_SIZE;
> +   return RET_PF_USER;
> +}
> +
> +static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> +  struct kvm_page_fault *fault)
> +{
> +   int order;
> +   struct kvm_memory_slot *slot = fault->slot;
> +
> +   if (!kvm_slot_can_be_private(slot))
> +   return kvm_do_memory_fault_exit(vcpu, fault);
> +
> +   if (kvm_restricted_mem_get_pfn(slot, fault->gfn, >pfn, ))
> +   return RET_PF_RETRY;
> +
> +   fault->max_level = min(order_to_level(order), fault->max_level);
> +   fault->map_writable = !(slot->flags & KVM_MEM_READONLY);
> +   return RET_PF_CONTINUE;
> +}
> +
>  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault 
> *fault)
>  {
> struct kvm_memory_slot *slot = fault->slot;
> @@ -4210,6 +4257,12 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, 
> struct kvm_page_fault *fault)
> return RET_PF_EMULATE;
> }
>
> +   if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn))
> +   return 

Re: [PATCH v10 8/9] KVM: Handle page fault for private memory

2022-12-08 Thread Yuan Yao
On Thu, Dec 08, 2022 at 07:23:46PM +0800, Chao Peng wrote:
> On Thu, Dec 08, 2022 at 10:29:18AM +0800, Yuan Yao wrote:
> > On Fri, Dec 02, 2022 at 02:13:46PM +0800, Chao Peng wrote:
> > > A KVM_MEM_PRIVATE memslot can include both fd-based private memory and
> > > hva-based shared memory. Architecture code (like TDX code) can tell
> > > whether the on-going fault is private or not. This patch adds a
> > > 'is_private' field to kvm_page_fault to indicate this and architecture
> > > code is expected to set it.
> > >
> > > To handle page fault for such memslot, the handling logic is different
> > > depending on whether the fault is private or shared. KVM checks if
> > > 'is_private' matches the host's view of the page (maintained in
> > > mem_attr_array).
> > >   - For a successful match, private pfn is obtained with
> > > restrictedmem_get_page() and shared pfn is obtained with existing
> > > get_user_pages().
> > >   - For a failed match, KVM causes a KVM_EXIT_MEMORY_FAULT exit to
> > > userspace. Userspace then can convert memory between private/shared
> > > in host's view and retry the fault.
> > >
> > > Co-developed-by: Yu Zhang 
> > > Signed-off-by: Yu Zhang 
> > > Signed-off-by: Chao Peng 
> > > ---
> > >  arch/x86/kvm/mmu/mmu.c  | 63 +++--
> > >  arch/x86/kvm/mmu/mmu_internal.h | 14 +++-
> > >  arch/x86/kvm/mmu/mmutrace.h |  1 +
> > >  arch/x86/kvm/mmu/tdp_mmu.c  |  2 +-
> > >  include/linux/kvm_host.h| 30 
> > >  5 files changed, 105 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > > index 2190fd8c95c0..b1953ebc012e 100644
> > > --- a/arch/x86/kvm/mmu/mmu.c
> > > +++ b/arch/x86/kvm/mmu/mmu.c
> > > @@ -3058,7 +3058,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, 
> > > gfn_t gfn,
> > >
> > >  int kvm_mmu_max_mapping_level(struct kvm *kvm,
> > > const struct kvm_memory_slot *slot, gfn_t gfn,
> > > -   int max_level)
> > > +   int max_level, bool is_private)
> > >  {
> > >   struct kvm_lpage_info *linfo;
> > >   int host_level;
> > > @@ -3070,6 +3070,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
> > >   break;
> > >   }
> > >
> > > + if (is_private)
> > > + return max_level;
> >
> > lpage mixed information already saved, so is that possible
> > to query info->disallow_lpage without care 'is_private' ?
>
> Actually we already queried info->disallow_lpage just before this
> sentence. The check is needed because later in the function we call
> host_pfn_mapping_level() which is shared memory specific.

You're right. We can't get mapping level info for private page from
host_pfn_mapping_level().

>
> Thanks,
> Chao
> >
> > > +
> > >   if (max_level == PG_LEVEL_4K)
> > >   return PG_LEVEL_4K;
> > >
> > > @@ -3098,7 +3101,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, 
> > > struct kvm_page_fault *fault
> > >* level, which will be used to do precise, accurate accounting.
> > >*/
> > >   fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
> > > -  fault->gfn, 
> > > fault->max_level);
> > > +  fault->gfn, 
> > > fault->max_level,
> > > +  fault->is_private);
> > >   if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
> > >   return;
> > >
> > > @@ -4178,6 +4182,49 @@ void kvm_arch_async_page_ready(struct kvm_vcpu 
> > > *vcpu, struct kvm_async_pf *work)
> > >   kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
> > >  }
> > >
> > > +static inline u8 order_to_level(int order)
> > > +{
> > > + BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> > > +
> > > + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> > > + return PG_LEVEL_1G;
> > > +
> > > + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> > > + return PG_LEVEL_2M;
> > > +
> > > + return PG_LEVEL_4K;
> > > +}
> > > +
> > > +static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
> > > + struct kvm_page_fault *fault)
> > > +{
> > > + vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
> > > + if (fault->is_private)
> > > + vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
> > > + else
> > > + vcpu->run->memory.flags = 0;
> > > + vcpu->run->memory.gpa = fault->gfn << PAGE_SHIFT;
> > > + vcpu->run->memory.size = PAGE_SIZE;
> > > + return RET_PF_USER;
> > > +}
> > > +
> > > +static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> > > +struct kvm_page_fault *fault)
> > > +{
> > > + int order;
> > > + struct kvm_memory_slot *slot = fault->slot;
> > > +
> > > + if (!kvm_slot_can_be_private(slot))
> > > + return kvm_do_memory_fault_exit(vcpu, fault);
> > > +
> > > + if (kvm_restricted_mem_get_pfn(slot, fault->gfn, >pfn, ))

Re: [PATCH v10 8/9] KVM: Handle page fault for private memory

2022-12-08 Thread Chao Peng
On Thu, Dec 08, 2022 at 10:29:18AM +0800, Yuan Yao wrote:
> On Fri, Dec 02, 2022 at 02:13:46PM +0800, Chao Peng wrote:
> > A KVM_MEM_PRIVATE memslot can include both fd-based private memory and
> > hva-based shared memory. Architecture code (like TDX code) can tell
> > whether the on-going fault is private or not. This patch adds a
> > 'is_private' field to kvm_page_fault to indicate this and architecture
> > code is expected to set it.
> >
> > To handle page fault for such memslot, the handling logic is different
> > depending on whether the fault is private or shared. KVM checks if
> > 'is_private' matches the host's view of the page (maintained in
> > mem_attr_array).
> >   - For a successful match, private pfn is obtained with
> > restrictedmem_get_page() and shared pfn is obtained with existing
> > get_user_pages().
> >   - For a failed match, KVM causes a KVM_EXIT_MEMORY_FAULT exit to
> > userspace. Userspace then can convert memory between private/shared
> > in host's view and retry the fault.
> >
> > Co-developed-by: Yu Zhang 
> > Signed-off-by: Yu Zhang 
> > Signed-off-by: Chao Peng 
> > ---
> >  arch/x86/kvm/mmu/mmu.c  | 63 +++--
> >  arch/x86/kvm/mmu/mmu_internal.h | 14 +++-
> >  arch/x86/kvm/mmu/mmutrace.h |  1 +
> >  arch/x86/kvm/mmu/tdp_mmu.c  |  2 +-
> >  include/linux/kvm_host.h| 30 
> >  5 files changed, 105 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 2190fd8c95c0..b1953ebc012e 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -3058,7 +3058,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, 
> > gfn_t gfn,
> >
> >  int kvm_mmu_max_mapping_level(struct kvm *kvm,
> >   const struct kvm_memory_slot *slot, gfn_t gfn,
> > - int max_level)
> > + int max_level, bool is_private)
> >  {
> > struct kvm_lpage_info *linfo;
> > int host_level;
> > @@ -3070,6 +3070,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
> > break;
> > }
> >
> > +   if (is_private)
> > +   return max_level;
> 
> lpage mixed information already saved, so is that possible
> to query info->disallow_lpage without care 'is_private' ?

Actually we already queried info->disallow_lpage just before this
sentence. The check is needed because later in the function we call
host_pfn_mapping_level() which is shared memory specific.

Thanks,
Chao
> 
> > +
> > if (max_level == PG_LEVEL_4K)
> > return PG_LEVEL_4K;
> >
> > @@ -3098,7 +3101,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, 
> > struct kvm_page_fault *fault
> >  * level, which will be used to do precise, accurate accounting.
> >  */
> > fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
> > -fault->gfn, 
> > fault->max_level);
> > +fault->gfn, 
> > fault->max_level,
> > +fault->is_private);
> > if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
> > return;
> >
> > @@ -4178,6 +4182,49 @@ void kvm_arch_async_page_ready(struct kvm_vcpu 
> > *vcpu, struct kvm_async_pf *work)
> > kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
> >  }
> >
> > +static inline u8 order_to_level(int order)
> > +{
> > +   BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> > +
> > +   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> > +   return PG_LEVEL_1G;
> > +
> > +   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> > +   return PG_LEVEL_2M;
> > +
> > +   return PG_LEVEL_4K;
> > +}
> > +
> > +static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
> > +   struct kvm_page_fault *fault)
> > +{
> > +   vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
> > +   if (fault->is_private)
> > +   vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
> > +   else
> > +   vcpu->run->memory.flags = 0;
> > +   vcpu->run->memory.gpa = fault->gfn << PAGE_SHIFT;
> > +   vcpu->run->memory.size = PAGE_SIZE;
> > +   return RET_PF_USER;
> > +}
> > +
> > +static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> > +  struct kvm_page_fault *fault)
> > +{
> > +   int order;
> > +   struct kvm_memory_slot *slot = fault->slot;
> > +
> > +   if (!kvm_slot_can_be_private(slot))
> > +   return kvm_do_memory_fault_exit(vcpu, fault);
> > +
> > +   if (kvm_restricted_mem_get_pfn(slot, fault->gfn, >pfn, ))
> > +   return RET_PF_RETRY;
> > +
> > +   fault->max_level = min(order_to_level(order), fault->max_level);
> > +   fault->map_writable = !(slot->flags & KVM_MEM_READONLY);
> > +   return RET_PF_CONTINUE;
> > +}
> > +
> >  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault 
> 

Re: [PATCH v10 8/9] KVM: Handle page fault for private memory

2022-12-07 Thread Yuan Yao
On Fri, Dec 02, 2022 at 02:13:46PM +0800, Chao Peng wrote:
> A KVM_MEM_PRIVATE memslot can include both fd-based private memory and
> hva-based shared memory. Architecture code (like TDX code) can tell
> whether the on-going fault is private or not. This patch adds a
> 'is_private' field to kvm_page_fault to indicate this and architecture
> code is expected to set it.
>
> To handle page fault for such memslot, the handling logic is different
> depending on whether the fault is private or shared. KVM checks if
> 'is_private' matches the host's view of the page (maintained in
> mem_attr_array).
>   - For a successful match, private pfn is obtained with
> restrictedmem_get_page() and shared pfn is obtained with existing
> get_user_pages().
>   - For a failed match, KVM causes a KVM_EXIT_MEMORY_FAULT exit to
> userspace. Userspace then can convert memory between private/shared
> in host's view and retry the fault.
>
> Co-developed-by: Yu Zhang 
> Signed-off-by: Yu Zhang 
> Signed-off-by: Chao Peng 
> ---
>  arch/x86/kvm/mmu/mmu.c  | 63 +++--
>  arch/x86/kvm/mmu/mmu_internal.h | 14 +++-
>  arch/x86/kvm/mmu/mmutrace.h |  1 +
>  arch/x86/kvm/mmu/tdp_mmu.c  |  2 +-
>  include/linux/kvm_host.h| 30 
>  5 files changed, 105 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 2190fd8c95c0..b1953ebc012e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3058,7 +3058,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, 
> gfn_t gfn,
>
>  int kvm_mmu_max_mapping_level(struct kvm *kvm,
> const struct kvm_memory_slot *slot, gfn_t gfn,
> -   int max_level)
> +   int max_level, bool is_private)
>  {
>   struct kvm_lpage_info *linfo;
>   int host_level;
> @@ -3070,6 +3070,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
>   break;
>   }
>
> + if (is_private)
> + return max_level;

lpage mixed information already saved, so is that possible
to query info->disallow_lpage without care 'is_private' ?

> +
>   if (max_level == PG_LEVEL_4K)
>   return PG_LEVEL_4K;
>
> @@ -3098,7 +3101,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, 
> struct kvm_page_fault *fault
>* level, which will be used to do precise, accurate accounting.
>*/
>   fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
> -  fault->gfn, 
> fault->max_level);
> +  fault->gfn, 
> fault->max_level,
> +  fault->is_private);
>   if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
>   return;
>
> @@ -4178,6 +4182,49 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 
> struct kvm_async_pf *work)
>   kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
>  }
>
> +static inline u8 order_to_level(int order)
> +{
> + BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> +
> + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> + return PG_LEVEL_1G;
> +
> + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> + return PG_LEVEL_2M;
> +
> + return PG_LEVEL_4K;
> +}
> +
> +static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
> + struct kvm_page_fault *fault)
> +{
> + vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
> + if (fault->is_private)
> + vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
> + else
> + vcpu->run->memory.flags = 0;
> + vcpu->run->memory.gpa = fault->gfn << PAGE_SHIFT;
> + vcpu->run->memory.size = PAGE_SIZE;
> + return RET_PF_USER;
> +}
> +
> +static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> +struct kvm_page_fault *fault)
> +{
> + int order;
> + struct kvm_memory_slot *slot = fault->slot;
> +
> + if (!kvm_slot_can_be_private(slot))
> + return kvm_do_memory_fault_exit(vcpu, fault);
> +
> + if (kvm_restricted_mem_get_pfn(slot, fault->gfn, >pfn, ))
> + return RET_PF_RETRY;
> +
> + fault->max_level = min(order_to_level(order), fault->max_level);
> + fault->map_writable = !(slot->flags & KVM_MEM_READONLY);
> + return RET_PF_CONTINUE;
> +}
> +
>  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault 
> *fault)
>  {
>   struct kvm_memory_slot *slot = fault->slot;
> @@ -4210,6 +4257,12 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, 
> struct kvm_page_fault *fault)
>   return RET_PF_EMULATE;
>   }
>
> + if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn))
> + return kvm_do_memory_fault_exit(vcpu, fault);
> +
> + if (fault->is_private)
> +  

[PATCH v10 8/9] KVM: Handle page fault for private memory

2022-12-01 Thread Chao Peng
A KVM_MEM_PRIVATE memslot can include both fd-based private memory and
hva-based shared memory. Architecture code (like TDX code) can tell
whether the on-going fault is private or not. This patch adds a
'is_private' field to kvm_page_fault to indicate this and architecture
code is expected to set it.

To handle page fault for such memslot, the handling logic is different
depending on whether the fault is private or shared. KVM checks if
'is_private' matches the host's view of the page (maintained in
mem_attr_array).
  - For a successful match, private pfn is obtained with
restrictedmem_get_page() and shared pfn is obtained with existing
get_user_pages().
  - For a failed match, KVM causes a KVM_EXIT_MEMORY_FAULT exit to
userspace. Userspace then can convert memory between private/shared
in host's view and retry the fault.

Co-developed-by: Yu Zhang 
Signed-off-by: Yu Zhang 
Signed-off-by: Chao Peng 
---
 arch/x86/kvm/mmu/mmu.c  | 63 +++--
 arch/x86/kvm/mmu/mmu_internal.h | 14 +++-
 arch/x86/kvm/mmu/mmutrace.h |  1 +
 arch/x86/kvm/mmu/tdp_mmu.c  |  2 +-
 include/linux/kvm_host.h| 30 
 5 files changed, 105 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2190fd8c95c0..b1953ebc012e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3058,7 +3058,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t 
gfn,
 
 int kvm_mmu_max_mapping_level(struct kvm *kvm,
  const struct kvm_memory_slot *slot, gfn_t gfn,
- int max_level)
+ int max_level, bool is_private)
 {
struct kvm_lpage_info *linfo;
int host_level;
@@ -3070,6 +3070,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
break;
}
 
+   if (is_private)
+   return max_level;
+
if (max_level == PG_LEVEL_4K)
return PG_LEVEL_4K;
 
@@ -3098,7 +3101,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, 
struct kvm_page_fault *fault
 * level, which will be used to do precise, accurate accounting.
 */
fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
-fault->gfn, 
fault->max_level);
+fault->gfn, 
fault->max_level,
+fault->is_private);
if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
return;
 
@@ -4178,6 +4182,49 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 
struct kvm_async_pf *work)
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
 }
 
+static inline u8 order_to_level(int order)
+{
+   BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
+
+   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
+   return PG_LEVEL_1G;
+
+   if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
+   return PG_LEVEL_2M;
+
+   return PG_LEVEL_4K;
+}
+
+static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
+   struct kvm_page_fault *fault)
+{
+   vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+   if (fault->is_private)
+   vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
+   else
+   vcpu->run->memory.flags = 0;
+   vcpu->run->memory.gpa = fault->gfn << PAGE_SHIFT;
+   vcpu->run->memory.size = PAGE_SIZE;
+   return RET_PF_USER;
+}
+
+static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
+  struct kvm_page_fault *fault)
+{
+   int order;
+   struct kvm_memory_slot *slot = fault->slot;
+
+   if (!kvm_slot_can_be_private(slot))
+   return kvm_do_memory_fault_exit(vcpu, fault);
+
+   if (kvm_restricted_mem_get_pfn(slot, fault->gfn, >pfn, ))
+   return RET_PF_RETRY;
+
+   fault->max_level = min(order_to_level(order), fault->max_level);
+   fault->map_writable = !(slot->flags & KVM_MEM_READONLY);
+   return RET_PF_CONTINUE;
+}
+
 static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
struct kvm_memory_slot *slot = fault->slot;
@@ -4210,6 +4257,12 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct 
kvm_page_fault *fault)
return RET_PF_EMULATE;
}
 
+   if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn))
+   return kvm_do_memory_fault_exit(vcpu, fault);
+
+   if (fault->is_private)
+   return kvm_faultin_pfn_private(vcpu, fault);
+
async = false;
fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, 
,
  fault->write, >map_writable,
@@ -5599,6 +5652,9 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, 
gpa_t