Re: [PATCH v2 4/6] KVM: Delete the now unused kvm_arch_sched_in()

2024-05-23 Thread maobibo




On 2024/5/22 上午9:40, Sean Christopherson wrote:

Delete kvm_arch_sched_in() now that all implementations are nops.

Signed-off-by: Sean Christopherson 
---
  arch/arm64/include/asm/kvm_host.h | 1 -
  arch/loongarch/include/asm/kvm_host.h | 1 -
  arch/mips/include/asm/kvm_host.h  | 1 -
  arch/powerpc/include/asm/kvm_host.h   | 1 -
  arch/riscv/include/asm/kvm_host.h | 1 -
  arch/s390/include/asm/kvm_host.h  | 1 -
  arch/x86/kvm/pmu.c| 6 +++---
  arch/x86/kvm/x86.c| 5 -
  include/linux/kvm_host.h  | 2 --
  virt/kvm/kvm_main.c   | 1 -
  10 files changed, 3 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 8170c04fde91..615e7a2e5590 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1225,7 +1225,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
  }
  
  static inline void kvm_arch_sync_events(struct kvm *kvm) {}

-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  
  void kvm_arm_init_debug(void);

  void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/include/asm/kvm_host.h 
b/arch/loongarch/include/asm/kvm_host.h
index c87b6ea0ec47..4162a252cdf6 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -261,7 +261,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch 
*arch)
  static inline void kvm_arch_hardware_unsetup(void) {}
  static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 179f320cc231..6743a57c1ab4 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -890,7 +890,6 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  static inline void kvm_arch_free_memslot(struct kvm *kvm,
 struct kvm_memory_slot *slot) {}
  static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h

index 8abac532146e..c4fb6a27fb92 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -897,7 +897,6 @@ struct kvm_vcpu_arch {
  static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
  static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h

index d96281278586..dd77c2db6819 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -286,7 +286,6 @@ struct kvm_vcpu_arch {
  };
  
  static inline void kvm_arch_sync_events(struct kvm *kvm) {}

-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  
  #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
  
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h

index 95990461888f..e9fcaf4607a6 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1045,7 +1045,6 @@ extern int kvm_s390_gisc_register(struct kvm *kvm, u32 
gisc);
  extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
  
  static inline void kvm_arch_sync_events(struct kvm *kvm) {}

-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  static inline void kvm_arch_free_memslot(struct kvm *kvm,
 struct kvm_memory_slot *slot) {}
  static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a593b03c9aed..f9149c9fc275 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -521,9 +521,9 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
}
  
  	/*

-* Unused perf_events are only released if the corresponding MSRs
-* weren't accessed during the last vCPU time slice. kvm_arch_sched_in
-* triggers KVM_REQ_PMU if cleanup is needed.
+* Release unused perf_events 

Re: [PATCH 1/4] KVM: delete .change_pte MMU notifier callback

2024-04-08 Thread maobibo




On 2024/4/5 下午7:58, Paolo Bonzini wrote:

The .change_pte() MMU notifier callback was intended as an
optimization. The original point of it was that KSM could tell KVM to flip
its secondary PTE to a new location without having to first zap it. At
the time there was also an .invalidate_page() callback; both of them were
*not* bracketed by calls to mmu_notifier_invalidate_range_{start,end}(),
and .invalidate_page() also doubled as a fallback implementation of
change_pte().

Later on, however, both callbacks were changed to occur within an
invalidate_range_start/end() block.

In the case of .change_pte(), commit 6bdb913f0a70 ("mm: wrap calls to
set_pte_at_notify with invalidate_range_start and invalidate_range_end",
2012-10-09) did so to remove the fallback from .invalidate_page() to
change_pte() and allow sleepable .invalidate_page() hooks.

This however made KVM's usage of the .change_pte() callback completely
moot, because KVM unmaps the sPTEs during .invalidate_range_start()
and therefore .change_pte() has no hope of finding a sPTE to change.
Drop the generic KVM code that dispatches to kvm_set_spte_gfn(), as
well as all the architecture specific implementations.

Signed-off-by: Paolo Bonzini 
---
  arch/arm64/kvm/mmu.c  | 34 -
  arch/loongarch/include/asm/kvm_host.h |  1 -
  arch/loongarch/kvm/mmu.c  | 32 
  arch/mips/kvm/mmu.c   | 30 ---
  arch/powerpc/include/asm/kvm_ppc.h|  1 -
  arch/powerpc/kvm/book3s.c |  5 ---
  arch/powerpc/kvm/book3s.h |  1 -
  arch/powerpc/kvm/book3s_64_mmu_hv.c   | 12 --
  arch/powerpc/kvm/book3s_hv.c  |  1 -
  arch/powerpc/kvm/book3s_pr.c  |  7 
  arch/powerpc/kvm/e500_mmu_host.c  |  6 ---
  arch/riscv/kvm/mmu.c  | 20 --
  arch/x86/kvm/mmu/mmu.c| 54 +--
  arch/x86/kvm/mmu/spte.c   | 16 
  arch/x86/kvm/mmu/spte.h   |  2 -
  arch/x86/kvm/mmu/tdp_mmu.c| 46 ---
  arch/x86/kvm/mmu/tdp_mmu.h|  1 -
  include/linux/kvm_host.h  |  2 -
  include/trace/events/kvm.h| 15 
  virt/kvm/kvm_main.c   | 43 -
  20 files changed, 2 insertions(+), 327 deletions(-)

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index dc04bc767865..ff17849be9f4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1768,40 +1768,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct 
kvm_gfn_range *range)
return false;
  }
  
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)

-{
-   kvm_pfn_t pfn = pte_pfn(range->arg.pte);
-
-   if (!kvm->arch.mmu.pgt)
-   return false;
-
-   WARN_ON(range->end - range->start != 1);
-
-   /*
-* If the page isn't tagged, defer to user_mem_abort() for sanitising
-* the MTE tags. The S2 pte should have been unmapped by
-* mmu_notifier_invalidate_range_end().
-*/
-   if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
-   return false;
-
-   /*
-* We've moved a page around, probably through CoW, so let's treat
-* it just like a translation fault and the map handler will clean
-* the cache to the PoC.
-*
-* The MMU notifiers will have unmapped a huge PMD before calling
-* ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
-* therefore we never need to clear out a huge PMD through this
-* calling path and a memcache is not required.
-*/
-   kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
-  PAGE_SIZE, __pfn_to_phys(pfn),
-  KVM_PGTABLE_PROT_R, NULL, 0);
-
-   return false;
-}
-
  bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
  {
u64 size = (range->end - range->start) << PAGE_SHIFT;
diff --git a/arch/loongarch/include/asm/kvm_host.h 
b/arch/loongarch/include/asm/kvm_host.h
index 2d62f7b0d377..69305441f40d 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -203,7 +203,6 @@ void kvm_flush_tlb_all(void);
  void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
  int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool 
write);
  
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);

  int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long 
end, bool blockable);
  int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
  int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index a556cff35740..98883aa23ab8 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -494,38 +494,6 @@ bool kvm_unmap_gfn_range(struct 

Re: [PATCH 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-20 Thread maobibo



On 04/19/2020 03:51 PM, Tianjia Zhang wrote:
> In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
> structure. Earlier than historical reasons, many kvm-related function
> parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
> This patch does a unified cleanup of these remaining redundant parameters.
> 
> Signed-off-by: Tianjia Zhang 
> ---
>  arch/mips/include/asm/kvm_host.h |  4 ++--
>  arch/mips/kvm/entry.c| 15 +--
>  arch/mips/kvm/mips.c |  3 ++-
>  arch/mips/kvm/trap_emul.c|  2 +-
>  arch/mips/kvm/vz.c   |  2 +-
>  5 files changed, 11 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/mips/include/asm/kvm_host.h 
> b/arch/mips/include/asm/kvm_host.h
> index 971439297cea..db915c55166d 100644
> --- a/arch/mips/include/asm/kvm_host.h
> +++ b/arch/mips/include/asm/kvm_host.h
> @@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
>  #define KVM_MIPS_GUEST_TLB_SIZE  64
>  struct kvm_vcpu_arch {
>   void *guest_ebase;
> - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> + int (*vcpu_run)(struct kvm_vcpu *vcpu);
>  
>   /* Host registers preserved across guest mode execution */
>   unsigned long host_stack;
> @@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
> **install_callbacks);
>  /* Debug: dump vcpu state */
>  int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>  
> -extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
>  
>  /* Building of entry/exception code */
>  int kvm_mips_entry_setup(void);
> diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
> index 16e1c93b484f..e3f29af3b6cd 100644
> --- a/arch/mips/kvm/entry.c
> +++ b/arch/mips/kvm/entry.c
> @@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned 
> int reg)
>   * Assemble the start of the vcpu_run function to run a guest VCPU. The 
> function
>   * conforms to the following prototype:
>   *
> - * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
> + * int vcpu_run(struct kvm_vcpu *vcpu);
>   *
>   * The exit from the guest and return to the caller is handled by the code
>   * generated by kvm_mips_build_ret_to_host().
> @@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
>   unsigned int i;
>  
>   /*
> -  * A0: run
> -  * A1: vcpu
> +  * A0: vcpu
>*/
>  
>   /* k0/k1 not being used in host kernel context */
> @@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
>   kvm_mips_build_save_scratch(, V1, K1);
>  
>   /* VCPU scratch register has pointer to vcpu */
> - UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
> + UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
>  
>   /* Offset into vcpu->arch */
> - UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
> + UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
>  
>   /*
>* Save the host stack to VCPU, used for exception processing
> @@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
>   /* Now that context has been saved, we can use other registers */
>  
>   /* Restore vcpu */
> - UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
> -
> - /* Restore run (vcpu->run) */
> - UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
> + UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
>  
>   /*
>* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
> @@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
>* with this in the kernel
>*/
>   uasm_i_move(, A0, S0);
> - uasm_i_move(, A1, S1);
>   UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
>   uasm_i_jalr(, RA, T9);
>UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);

I suggest keeping asm code untouched, the change for c code is much easier to 
understand, however I do not see obvious advantage to remove one redundant 
function parameter :) 


regards
bibo,mao


> diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> index 9710477a9827..32850470c037 100644
> --- a/arch/mips/kvm/mips.c
> +++ b/arch/mips/kvm/mips.c
> @@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
>  /*
>   * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | 
> RESUME_FLAG_NV)
>   */
> -int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
> +int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
>  {
> + struct kvm_run *run = vcpu->run;
>   u32 cause = vcpu->arch.host_cp0_cause;
>   u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
>   u32 __user *opc = (u32 __user *) vcpu->arch.pc;
> diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
> index d822f3aee3dc..04c864cc356a 100644
> --- a/arch/mips/kvm/trap_emul.c
> +++ b/arch/mips/kvm/trap_emul.c
> @@ -1238,7 +1238,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)