Collapse kvm_flush_remote_tlbs_with_range() and
kvm_flush_remote_tlbs_with_address() into a single function. This
eliminates some lines of code and a useless NULL check on the range
struct.

Opportunistically switch from ENOTSUPP to EOPNOTSUPP to make checkpatch
happy.

Signed-off-by: David Matlack <dmatl...@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f01ee01f3509..b7bbabac9127 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -244,27 +244,20 @@ static inline bool 
kvm_available_flush_tlb_with_range(void)
        return kvm_x86_ops.tlb_remote_flush_with_range;
 }
 
-static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
-               struct kvm_tlb_range *range)
-{
-       int ret = -ENOTSUPP;
-
-       if (range && kvm_x86_ops.tlb_remote_flush_with_range)
-               ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, 
range);
-
-       if (ret)
-               kvm_flush_remote_tlbs(kvm);
-}
-
 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
                u64 start_gfn, u64 pages)
 {
        struct kvm_tlb_range range;
+       int ret = -EOPNOTSUPP;
 
        range.start_gfn = start_gfn;
        range.pages = pages;
 
-       kvm_flush_remote_tlbs_with_range(kvm, &range);
+       if (kvm_x86_ops.tlb_remote_flush_with_range)
+               ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, 
&range);
+
+       if (ret)
+               kvm_flush_remote_tlbs(kvm);
 }
 
 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
-- 
2.39.0.rc1.256.g54fd8350bd-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to