Decouple hugepage_has_attrs() from struct kvm's mem_attr_array to
allow other memory attribute sources to use the function.

Signed-off-by: Nicolas Saenz Julienne <nsa...@amazon.com>
---
 arch/x86/kvm/mmu/mmu.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4ace2f8660b0..c0fd3afd6be5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7290,19 +7290,19 @@ static void hugepage_set_mixed(struct kvm_memory_slot 
*slot, gfn_t gfn,
        lpage_info_slot(gfn, slot, level)->disallow_lpage |= 
KVM_LPAGE_MIXED_FLAG;
 }
 
-static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
-                              gfn_t gfn, int level, unsigned long attrs)
+static bool hugepage_has_attrs(struct xarray *mem_attr_array,
+                              struct kvm_memory_slot *slot, gfn_t gfn,
+                              int level, unsigned long attrs)
 {
        const unsigned long start = gfn;
        const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
 
        if (level == PG_LEVEL_2M)
-               return kvm_range_has_memory_attributes(&kvm->mem_attr_array,
-                                                      start, end, attrs);
+               return kvm_range_has_memory_attributes(mem_attr_array, start, 
end, attrs);
 
        for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
                if (hugepage_test_mixed(slot, gfn, level - 1) ||
-                   attrs != kvm_get_memory_attributes(&kvm->mem_attr_array, 
gfn))
+                   attrs != kvm_get_memory_attributes(mem_attr_array, gfn))
                        return false;
        }
        return true;
@@ -7344,7 +7344,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
                         * misaligned address regardless of memory attributes.
                         */
                        if (gfn >= slot->base_gfn) {
-                               if (hugepage_has_attrs(kvm, slot, gfn, level, 
attrs))
+                               if (hugepage_has_attrs(&kvm->mem_attr_array,
+                                                      slot, gfn, level, attrs))
                                        hugepage_clear_mixed(slot, gfn, level);
                                else
                                        hugepage_set_mixed(slot, gfn, level);
@@ -7366,7 +7367,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
                 */
                if (gfn < range->end &&
                    (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
-                       if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
+                       if (hugepage_has_attrs(&kvm->mem_attr_array, slot, gfn,
+                                              level, attrs))
                                hugepage_clear_mixed(slot, gfn, level);
                        else
                                hugepage_set_mixed(slot, gfn, level);
@@ -7405,7 +7407,7 @@ void kvm_mmu_init_memslot_memory_attributes(struct kvm 
*kvm,
                        unsigned long attrs =
                                kvm_get_memory_attributes(&kvm->mem_attr_array, 
gfn);
 
-                       if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
+                       if (hugepage_has_attrs(&kvm->mem_attr_array, slot, gfn, 
level, attrs))
                                hugepage_clear_mixed(slot, gfn, level);
                        else
                                hugepage_set_mixed(slot, gfn, level);
-- 
2.40.1


Reply via email to