Hi,

Any update on this patch. We could drop patch 3. Any feedback on 1 and 2
?.

-aneesh

"Aneesh Kumar K.V" <aneesh.ku...@linux.vnet.ibm.com> writes:

> This patch adds helper routine for lock and unlock hpte and use
> the same for rest of the code. We don't change any locking rules in this
> patch. In the next patch we switch some of the unlock usage to use
> the api with barrier and also document the usage without barriers.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/kvm_book3s_64.h | 14 ++++++++++++++
>  arch/powerpc/kvm/book3s_64_mmu_hv.c      | 25 ++++++++++---------------
>  arch/powerpc/kvm/book3s_hv_rm_mmu.c      | 27 ++++++++++-----------------
>  3 files changed, 34 insertions(+), 32 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h 
> b/arch/powerpc/include/asm/kvm_book3s_64.h
> index 0aa817933e6a..ec9fb6085843 100644
> --- a/arch/powerpc/include/asm/kvm_book3s_64.h
> +++ b/arch/powerpc/include/asm/kvm_book3s_64.h
> @@ -86,6 +86,20 @@ static inline long try_lock_hpte(__be64 *hpte, unsigned 
> long bits)
>       return old == 0;
>  }
>  
> +static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
> +{
> +     hpte_v &= ~HPTE_V_HVLOCK;
> +     asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
> +     hpte[0] = cpu_to_be64(hpte_v);
> +}
> +
> +/* Without barrier */
> +static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
> +{
> +     hpte_v &= ~HPTE_V_HVLOCK;
> +     hpte[0] = cpu_to_be64(hpte_v);
> +}
> +
>  static inline int __hpte_actual_psize(unsigned int lp, int psize)
>  {
>       int i, shift;
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
> b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> index cebb86bc4a37..5ea4b2b6a157 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> @@ -475,9 +475,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu 
> *vcpu, gva_t eaddr,
>       v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
>       gr = kvm->arch.revmap[index].guest_rpte;
>  
> -     /* Unlock the HPTE */
> -     asm volatile("lwsync" : : : "memory");
> -     hptep[0] = cpu_to_be64(v);
> +     unlock_hpte(hptep, v);
>       preempt_enable();
>  
>       gpte->eaddr = eaddr;
> @@ -606,8 +604,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 
> struct kvm_vcpu *vcpu,
>       hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
>       hpte[1] = be64_to_cpu(hptep[1]);
>       hpte[2] = r = rev->guest_rpte;
> -     asm volatile("lwsync" : : : "memory");
> -     hptep[0] = cpu_to_be64(hpte[0]);
> +     unlock_hpte(hptep, hpte[0]);
>       preempt_enable();
>  
>       if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
> @@ -758,7 +755,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 
> struct kvm_vcpu *vcpu,
>  
>       hptep[1] = cpu_to_be64(r);
>       eieio();
> -     hptep[0] = cpu_to_be64(hpte[0]);
> +     __unlock_hpte(hptep, hpte[0]);
>       asm volatile("ptesync" : : : "memory");
>       preempt_enable();
>       if (page && hpte_is_writable(r))
> @@ -777,7 +774,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 
> struct kvm_vcpu *vcpu,
>       return ret;
>  
>   out_unlock:
> -     hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +     __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
>       preempt_enable();
>       goto out_put;
>  }
> @@ -907,7 +904,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long 
> *rmapp,
>                       }
>               }
>               unlock_rmap(rmapp);
> -             hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +             __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
>       }
>       return 0;
>  }
> @@ -995,7 +992,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long 
> *rmapp,
>                       }
>                       ret = 1;
>               }
> -             hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +             __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
>       } while ((i = j) != head);
>  
>       unlock_rmap(rmapp);
> @@ -1118,8 +1115,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, 
> unsigned long *rmapp)
>  
>               /* Now check and modify the HPTE */
>               if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
> -                     /* unlock and continue */
> -                     hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +                     __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
>                       continue;
>               }
>               /* need to make it temporarily absent so C is stable */
> @@ -1139,9 +1135,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, 
> unsigned long *rmapp)
>                               npages_dirty = n;
>                       eieio();
>               }
> -             v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
> +             v &= ~HPTE_V_ABSENT;
>               v |= HPTE_V_VALID;
> -             hptep[0] = cpu_to_be64(v);
> +             __unlock_hpte(hptep, v);
>       } while ((i = j) != head);
>  
>       unlock_rmap(rmapp);
> @@ -1379,8 +1375,7 @@ static long record_hpte(unsigned long flags, __be64 
> *hptp,
>                       r &= ~HPTE_GR_MODIFIED;
>                       revp->guest_rpte = r;
>               }
> -             asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
> -             hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +             unlock_hpte(hptp, be64_to_cpu(hptp[0]));
>               preempt_enable();
>               if (!(valid == want_valid && (first_pass || dirty)))
>                       ok = 0;
> diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c 
> b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> index 084ad54c73cd..769a5d4c0430 100644
> --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> @@ -154,12 +154,6 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, 
> unsigned long hva,
>       return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
>  }
>  
> -static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
> -{
> -     asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
> -     hpte[0] = cpu_to_be64(hpte_v);
> -}
> -
>  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
>                      long pte_index, unsigned long pteh, unsigned long ptel,
>                      pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
> @@ -295,10 +289,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long 
> flags,
>                               u64 pte;
>                               while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
>                                       cpu_relax();
> -                             pte = be64_to_cpu(*hpte);
> +                             pte = be64_to_cpu(hpte[0]);
>                               if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
>                                       break;
> -                             *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +                             __unlock_hpte(hpte, pte);
>                               hpte += 2;
>                       }
>                       if (i == 8)
> @@ -314,9 +308,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long 
> flags,
>  
>                       while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
>                               cpu_relax();
> -                     pte = be64_to_cpu(*hpte);
> +                     pte = be64_to_cpu(hpte[0]);
>                       if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
> -                             *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +                             __unlock_hpte(hpte, pte);
>                               return H_PTEG_FULL;
>                       }
>               }
> @@ -356,7 +350,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long 
> flags,
>  
>       /* Write the first HPTE dword, unlocking the HPTE and making it valid */
>       eieio();
> -     hpte[0] = cpu_to_be64(pteh);
> +     __unlock_hpte(hpte, pteh);
>       asm volatile("ptesync" : : : "memory");
>  
>       *pte_idx_ret = pte_index;
> @@ -487,7 +481,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long 
> flags,
>       if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
>           ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
>           ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
> -             hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +             __unlock_hpte(hpte, pte);
>               return H_NOT_FOUND;
>       }
>  
> @@ -623,7 +617,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
>                               be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
>                       rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
>                       args[j] |= rcbits << (56 - 5);
> -                     hp[0] = 0;
> +                     __unlock_hpte(hp, 0);
>               }
>       }
>  
> @@ -649,7 +643,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned 
> long flags,
>       pte = be64_to_cpu(hpte[0]);
>       if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
>           ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
> -             hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
> +             __unlock_hpte(hpte, pte);
>               return H_NOT_FOUND;
>       }
>  
> @@ -700,7 +694,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned 
> long flags,
>       }
>       hpte[1] = cpu_to_be64(r);
>       eieio();
> -     hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
> +     __unlock_hpte(hpte, v);
>       asm volatile("ptesync" : : : "memory");
>       return H_SUCCESS;
>  }
> @@ -841,8 +835,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t 
> eaddr, unsigned long slb_v,
>                               /* Return with the HPTE still locked */
>                               return (hash << 3) + (i >> 1);
>  
> -                     /* Unlock and move on */
> -                     hpte[i] = cpu_to_be64(v);
> +                     __unlock_hpte(&hpte[i], v);
>               }
>  
>               if (val & HPTE_V_SECONDARY)
> -- 
> 1.9.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to