This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
exit path. This allows next patch to add locks nicely.

This moves the ioba boundaries check to a helper and adds a check for
least bits which have to be zeros.

The patch is pretty mechanical (only check for least ioba bits is added)
so no change in behaviour is expected.

Signed-off-by: Alexey Kardashevskiy <a...@ozlabs.ru>
---
 arch/powerpc/kvm/book3s_64_vio_hv.c | 102 +++++++++++++++++++++++-------------
 1 file changed, 66 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c 
b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 89e96b3..8ae12ac 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -35,71 +35,101 @@
 #include <asm/ppc-opcode.h>
 #include <asm/kvm_host.h>
 #include <asm/udbg.h>
+#include <asm/iommu.h>
 
 #define TCES_PER_PAGE  (PAGE_SIZE / sizeof(u64))
 
+/*
+ * Finds a TCE table descriptor by LIOBN.
+ *
+ * WARNING: This will be called in real or virtual mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+               unsigned long liobn)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvmppc_spapr_tce_table *stt;
+
+       list_for_each_entry_rcu_notrace(stt, &kvm->arch.spapr_tce_tables, list)
+               if (stt->liobn == liobn)
+                       return stt;
+
+       return NULL;
+}
+
+/*
+ * Validates IO address.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+               unsigned long ioba, unsigned long npages)
+{
+       unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
+       unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
+       unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
+
+       if ((ioba & mask) || (size + npages <= idx))
+               return H_PARAMETER;
+
+       return H_SUCCESS;
+}
+
 /* WARNING: This will be called in real-mode on HV KVM and virtual
  *          mode on PR KVM
  */
 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                      unsigned long ioba, unsigned long tce)
 {
-       struct kvm *kvm = vcpu->kvm;
-       struct kvmppc_spapr_tce_table *stt;
+       struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+       long ret = H_TOO_HARD;
+       unsigned long idx;
+       struct page *page;
+       u64 *tbl;
 
        /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
        /*          liobn, ioba, tce); */
 
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-               if (stt->liobn == liobn) {
-                       unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-                       struct page *page;
-                       u64 *tbl;
+       if (!stt)
+               return ret;
 
-                       /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  
window_size=0x%x\n", */
-                       /*          liobn, stt, stt->window_size); */
-                       if (ioba >= stt->window_size)
-                               return H_PARAMETER;
+       ret = kvmppc_ioba_validate(stt, ioba, 1);
+       if (ret)
+               return ret;
 
-                       page = stt->pages[idx / TCES_PER_PAGE];
-                       tbl = (u64 *)page_address(page);
+       idx = ioba >> SPAPR_TCE_SHIFT;
+       page = stt->pages[idx / TCES_PER_PAGE];
+       tbl = (u64 *)page_address(page);
 
-                       /* FIXME: Need to validate the TCE itself */
-                       /* udbg_printf("tce @ %p\n", &tbl[idx % 
TCES_PER_PAGE]); */
-                       tbl[idx % TCES_PER_PAGE] = tce;
-                       return H_SUCCESS;
-               }
-       }
+       /* FIXME: Need to validate the TCE itself */
+       /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
+       tbl[idx % TCES_PER_PAGE] = tce;
 
-       /* Didn't find the liobn, punt it to userspace */
-       return H_TOO_HARD;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                      unsigned long ioba)
 {
-       struct kvm *kvm = vcpu->kvm;
-       struct kvmppc_spapr_tce_table *stt;
+       struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+       long ret = H_TOO_HARD;
 
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-               if (stt->liobn == liobn) {
+
+       if (stt) {
+               ret = kvmppc_ioba_validate(stt, ioba, 1);
+               if (!ret) {
                        unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-                       struct page *page;
-                       u64 *tbl;
-
-                       if (ioba >= stt->window_size)
-                               return H_PARAMETER;
-
-                       page = stt->pages[idx / TCES_PER_PAGE];
-                       tbl = (u64 *)page_address(page);
+                       struct page *page = stt->pages[idx / TCES_PER_PAGE];
+                       u64 *tbl = (u64 *)page_address(page);
 
                        vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
-                       return H_SUCCESS;
                }
        }
 
-       /* Didn't find the liobn, punt it to userspace */
-       return H_TOO_HARD;
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
-- 
2.4.0.rc3.8.gfb3e7d5

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to