Signed-off-by: Adalbert Lazăr <ala...@bitdefender.com>
---
 arch/x86/kvm/mmu/mmu.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 97766f34910d..f3ba4d0452c9 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2573,6 +2573,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
        bool flush = false;
        int collisions = 0;
        LIST_HEAD(invalid_list);
+       unsigned int pg_hash;
 
        role = vcpu->arch.mmu->mmu_role.base;
        role.level = level;
@@ -2623,8 +2624,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
 
        sp->gfn = gfn;
        sp->role = role;
+       pg_hash = kvm_page_table_hashfn(gfn);
        hlist_add_head(&sp->hash_link,
-               &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
+               &vcpu->kvm->arch.mmu_page_hash[pg_hash]);
        if (!direct) {
                /*
                 * we should do write protection before syncing pages
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to