Re: [PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers

2015-12-07 Thread David Gibson
On Tue, Sep 15, 2015 at 08:49:33PM +1000, Alexey Kardashevskiy wrote:
> This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
> exit path. This allows next patch to add locks nicely.

I don't see a problem with the actual code, but it doesn't seem to
match this description: I still see multiple return statements for
h_put_tce at least.

> This moves the ioba boundaries check to a helper and adds a check for
> least bits which have to be zeros.
> 
> The patch is pretty mechanical (only check for least ioba bits is added)
> so no change in behaviour is expected.
> 
> Signed-off-by: Alexey Kardashevskiy 
> ---
>  arch/powerpc/kvm/book3s_64_vio_hv.c | 102 
> +++-
>  1 file changed, 66 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c 
> b/arch/powerpc/kvm/book3s_64_vio_hv.c
> index 89e96b3..8ae12ac 100644
> --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
> @@ -35,71 +35,101 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #define TCES_PER_PAGE(PAGE_SIZE / sizeof(u64))
>  
> +/*
> + * Finds a TCE table descriptor by LIOBN.
> + *
> + * WARNING: This will be called in real or virtual mode on HV KVM and virtual
> + *  mode on PR KVM
> + */
> +static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu 
> *vcpu,
> + unsigned long liobn)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + struct kvmppc_spapr_tce_table *stt;
> +
> + list_for_each_entry_rcu_notrace(stt, >arch.spapr_tce_tables, list)
> + if (stt->liobn == liobn)
> + return stt;
> +
> + return NULL;
> +}
> +
> +/*
> + * Validates IO address.
> + *
> + * WARNING: This will be called in real-mode on HV KVM and virtual
> + *  mode on PR KVM
> + */
> +static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
> + unsigned long ioba, unsigned long npages)
> +{
> + unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
> + unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
> + unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
> +
> + if ((ioba & mask) || (size + npages <= idx))
> + return H_PARAMETER;

Not sure if it's worth a check for overflow in (size+npages) there.

> +
> + return H_SUCCESS;
> +}
> +
>  /* WARNING: This will be called in real-mode on HV KVM and virtual
>   *  mode on PR KVM
>   */
>  long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> unsigned long ioba, unsigned long tce)
>  {
> - struct kvm *kvm = vcpu->kvm;
> - struct kvmppc_spapr_tce_table *stt;
> + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> + long ret = H_TOO_HARD;
> + unsigned long idx;
> + struct page *page;
> + u64 *tbl;
>  
>   /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
>   /*  liobn, ioba, tce); */
>  
> - list_for_each_entry(stt, >arch.spapr_tce_tables, list) {
> - if (stt->liobn == liobn) {
> - unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
> - struct page *page;
> - u64 *tbl;
> + if (!stt)
> + return ret;
>  
> - /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  
> window_size=0x%x\n", */
> - /*  liobn, stt, stt->window_size); */
> - if (ioba >= stt->window_size)
> - return H_PARAMETER;
> + ret = kvmppc_ioba_validate(stt, ioba, 1);
> + if (ret)
> + return ret;
>  
> - page = stt->pages[idx / TCES_PER_PAGE];
> - tbl = (u64 *)page_address(page);
> + idx = ioba >> SPAPR_TCE_SHIFT;
> + page = stt->pages[idx / TCES_PER_PAGE];
> + tbl = (u64 *)page_address(page);
>  
> - /* FIXME: Need to validate the TCE itself */
> - /* udbg_printf("tce @ %p\n", [idx % 
> TCES_PER_PAGE]); */
> - tbl[idx % TCES_PER_PAGE] = tce;
> - return H_SUCCESS;
> - }
> - }
> + /* FIXME: Need to validate the TCE itself */
> + /* udbg_printf("tce @ %p\n", [idx % TCES_PER_PAGE]); */
> + tbl[idx % TCES_PER_PAGE] = tce;
>  
> - /* Didn't find the liobn, punt it to userspace */
> - return H_TOO_HARD;
> + return ret;

So, this relies on the fact that kvmppc_ioba_validate() would have
returned H_SUCCESS some distance above.  This seems rather fragile if
you insert anything else which alters ret in between.  Since this is
the success path, I think it's clearer to explicitly return H_SUCCESS.

>  }
>  EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
>  
>  long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> unsigned long ioba)
>  {
> - struct kvm *kvm = vcpu->kvm;
> - struct 

Re: [PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers

2015-12-07 Thread David Gibson
On Tue, Sep 15, 2015 at 08:49:33PM +1000, Alexey Kardashevskiy wrote:
> This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
> exit path. This allows next patch to add locks nicely.

I don't see a problem with the actual code, but it doesn't seem to
match this description: I still see multiple return statements for
h_put_tce at least.

> This moves the ioba boundaries check to a helper and adds a check for
> least bits which have to be zeros.
> 
> The patch is pretty mechanical (only check for least ioba bits is added)
> so no change in behaviour is expected.
> 
> Signed-off-by: Alexey Kardashevskiy 
> ---
>  arch/powerpc/kvm/book3s_64_vio_hv.c | 102 
> +++-
>  1 file changed, 66 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c 
> b/arch/powerpc/kvm/book3s_64_vio_hv.c
> index 89e96b3..8ae12ac 100644
> --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
> @@ -35,71 +35,101 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #define TCES_PER_PAGE(PAGE_SIZE / sizeof(u64))
>  
> +/*
> + * Finds a TCE table descriptor by LIOBN.
> + *
> + * WARNING: This will be called in real or virtual mode on HV KVM and virtual
> + *  mode on PR KVM
> + */
> +static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu 
> *vcpu,
> + unsigned long liobn)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + struct kvmppc_spapr_tce_table *stt;
> +
> + list_for_each_entry_rcu_notrace(stt, >arch.spapr_tce_tables, list)
> + if (stt->liobn == liobn)
> + return stt;
> +
> + return NULL;
> +}
> +
> +/*
> + * Validates IO address.
> + *
> + * WARNING: This will be called in real-mode on HV KVM and virtual
> + *  mode on PR KVM
> + */
> +static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
> + unsigned long ioba, unsigned long npages)
> +{
> + unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
> + unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
> + unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
> +
> + if ((ioba & mask) || (size + npages <= idx))
> + return H_PARAMETER;

Not sure if it's worth a check for overflow in (size+npages) there.

> +
> + return H_SUCCESS;
> +}
> +
>  /* WARNING: This will be called in real-mode on HV KVM and virtual
>   *  mode on PR KVM
>   */
>  long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> unsigned long ioba, unsigned long tce)
>  {
> - struct kvm *kvm = vcpu->kvm;
> - struct kvmppc_spapr_tce_table *stt;
> + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> + long ret = H_TOO_HARD;
> + unsigned long idx;
> + struct page *page;
> + u64 *tbl;
>  
>   /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
>   /*  liobn, ioba, tce); */
>  
> - list_for_each_entry(stt, >arch.spapr_tce_tables, list) {
> - if (stt->liobn == liobn) {
> - unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
> - struct page *page;
> - u64 *tbl;
> + if (!stt)
> + return ret;
>  
> - /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  
> window_size=0x%x\n", */
> - /*  liobn, stt, stt->window_size); */
> - if (ioba >= stt->window_size)
> - return H_PARAMETER;
> + ret = kvmppc_ioba_validate(stt, ioba, 1);
> + if (ret)
> + return ret;
>  
> - page = stt->pages[idx / TCES_PER_PAGE];
> - tbl = (u64 *)page_address(page);
> + idx = ioba >> SPAPR_TCE_SHIFT;
> + page = stt->pages[idx / TCES_PER_PAGE];
> + tbl = (u64 *)page_address(page);
>  
> - /* FIXME: Need to validate the TCE itself */
> - /* udbg_printf("tce @ %p\n", [idx % 
> TCES_PER_PAGE]); */
> - tbl[idx % TCES_PER_PAGE] = tce;
> - return H_SUCCESS;
> - }
> - }
> + /* FIXME: Need to validate the TCE itself */
> + /* udbg_printf("tce @ %p\n", [idx % TCES_PER_PAGE]); */
> + tbl[idx % TCES_PER_PAGE] = tce;
>  
> - /* Didn't find the liobn, punt it to userspace */
> - return H_TOO_HARD;
> + return ret;

So, this relies on the fact that kvmppc_ioba_validate() would have
returned H_SUCCESS some distance above.  This seems rather fragile if
you insert anything else which alters ret in between.  Since this is
the success path, I think it's clearer to explicitly return H_SUCCESS.

>  }
>  EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
>  
>  long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> unsigned long ioba)
>  {
> - struct kvm *kvm = vcpu->kvm;
> - struct 

[PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers

2015-09-15 Thread Alexey Kardashevskiy
This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
exit path. This allows next patch to add locks nicely.

This moves the ioba boundaries check to a helper and adds a check for
least bits which have to be zeros.

The patch is pretty mechanical (only check for least ioba bits is added)
so no change in behaviour is expected.

Signed-off-by: Alexey Kardashevskiy 
---
 arch/powerpc/kvm/book3s_64_vio_hv.c | 102 +++-
 1 file changed, 66 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c 
b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 89e96b3..8ae12ac 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -35,71 +35,101 @@
 #include 
 #include 
 #include 
+#include 
 
 #define TCES_PER_PAGE  (PAGE_SIZE / sizeof(u64))
 
+/*
+ * Finds a TCE table descriptor by LIOBN.
+ *
+ * WARNING: This will be called in real or virtual mode on HV KVM and virtual
+ *  mode on PR KVM
+ */
+static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+   unsigned long liobn)
+{
+   struct kvm *kvm = vcpu->kvm;
+   struct kvmppc_spapr_tce_table *stt;
+
+   list_for_each_entry_rcu_notrace(stt, >arch.spapr_tce_tables, list)
+   if (stt->liobn == liobn)
+   return stt;
+
+   return NULL;
+}
+
+/*
+ * Validates IO address.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *  mode on PR KVM
+ */
+static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+   unsigned long ioba, unsigned long npages)
+{
+   unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
+   unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
+   unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
+
+   if ((ioba & mask) || (size + npages <= idx))
+   return H_PARAMETER;
+
+   return H_SUCCESS;
+}
+
 /* WARNING: This will be called in real-mode on HV KVM and virtual
  *  mode on PR KVM
  */
 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  unsigned long ioba, unsigned long tce)
 {
-   struct kvm *kvm = vcpu->kvm;
-   struct kvmppc_spapr_tce_table *stt;
+   struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+   long ret = H_TOO_HARD;
+   unsigned long idx;
+   struct page *page;
+   u64 *tbl;
 
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/*  liobn, ioba, tce); */
 
-   list_for_each_entry(stt, >arch.spapr_tce_tables, list) {
-   if (stt->liobn == liobn) {
-   unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-   struct page *page;
-   u64 *tbl;
+   if (!stt)
+   return ret;
 
-   /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  
window_size=0x%x\n", */
-   /*  liobn, stt, stt->window_size); */
-   if (ioba >= stt->window_size)
-   return H_PARAMETER;
+   ret = kvmppc_ioba_validate(stt, ioba, 1);
+   if (ret)
+   return ret;
 
-   page = stt->pages[idx / TCES_PER_PAGE];
-   tbl = (u64 *)page_address(page);
+   idx = ioba >> SPAPR_TCE_SHIFT;
+   page = stt->pages[idx / TCES_PER_PAGE];
+   tbl = (u64 *)page_address(page);
 
-   /* FIXME: Need to validate the TCE itself */
-   /* udbg_printf("tce @ %p\n", [idx % 
TCES_PER_PAGE]); */
-   tbl[idx % TCES_PER_PAGE] = tce;
-   return H_SUCCESS;
-   }
-   }
+   /* FIXME: Need to validate the TCE itself */
+   /* udbg_printf("tce @ %p\n", [idx % TCES_PER_PAGE]); */
+   tbl[idx % TCES_PER_PAGE] = tce;
 
-   /* Didn't find the liobn, punt it to userspace */
-   return H_TOO_HARD;
+   return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  unsigned long ioba)
 {
-   struct kvm *kvm = vcpu->kvm;
-   struct kvmppc_spapr_tce_table *stt;
+   struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+   long ret = H_TOO_HARD;
 
-   list_for_each_entry(stt, >arch.spapr_tce_tables, list) {
-   if (stt->liobn == liobn) {
+
+   if (stt) {
+   ret = kvmppc_ioba_validate(stt, ioba, 1);
+   if (!ret) {
unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-   struct page *page;
-   u64 *tbl;
-
-   if (ioba >= stt->window_size)
-   return H_PARAMETER;
-
-   page = stt->pages[idx / TCES_PER_PAGE];
-   tbl = (u64 *)page_address(page);

[PATCH kernel 3/9] KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers

2015-09-15 Thread Alexey Kardashevskiy
This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have one
exit path. This allows next patch to add locks nicely.

This moves the ioba boundaries check to a helper and adds a check for
least bits which have to be zeros.

The patch is pretty mechanical (only check for least ioba bits is added)
so no change in behaviour is expected.

Signed-off-by: Alexey Kardashevskiy 
---
 arch/powerpc/kvm/book3s_64_vio_hv.c | 102 +++-
 1 file changed, 66 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c 
b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 89e96b3..8ae12ac 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -35,71 +35,101 @@
 #include 
 #include 
 #include 
+#include 
 
 #define TCES_PER_PAGE  (PAGE_SIZE / sizeof(u64))
 
+/*
+ * Finds a TCE table descriptor by LIOBN.
+ *
+ * WARNING: This will be called in real or virtual mode on HV KVM and virtual
+ *  mode on PR KVM
+ */
+static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+   unsigned long liobn)
+{
+   struct kvm *kvm = vcpu->kvm;
+   struct kvmppc_spapr_tce_table *stt;
+
+   list_for_each_entry_rcu_notrace(stt, >arch.spapr_tce_tables, list)
+   if (stt->liobn == liobn)
+   return stt;
+
+   return NULL;
+}
+
+/*
+ * Validates IO address.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *  mode on PR KVM
+ */
+static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+   unsigned long ioba, unsigned long npages)
+{
+   unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
+   unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
+   unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
+
+   if ((ioba & mask) || (size + npages <= idx))
+   return H_PARAMETER;
+
+   return H_SUCCESS;
+}
+
 /* WARNING: This will be called in real-mode on HV KVM and virtual
  *  mode on PR KVM
  */
 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  unsigned long ioba, unsigned long tce)
 {
-   struct kvm *kvm = vcpu->kvm;
-   struct kvmppc_spapr_tce_table *stt;
+   struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+   long ret = H_TOO_HARD;
+   unsigned long idx;
+   struct page *page;
+   u64 *tbl;
 
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/*  liobn, ioba, tce); */
 
-   list_for_each_entry(stt, >arch.spapr_tce_tables, list) {
-   if (stt->liobn == liobn) {
-   unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-   struct page *page;
-   u64 *tbl;
+   if (!stt)
+   return ret;
 
-   /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  
window_size=0x%x\n", */
-   /*  liobn, stt, stt->window_size); */
-   if (ioba >= stt->window_size)
-   return H_PARAMETER;
+   ret = kvmppc_ioba_validate(stt, ioba, 1);
+   if (ret)
+   return ret;
 
-   page = stt->pages[idx / TCES_PER_PAGE];
-   tbl = (u64 *)page_address(page);
+   idx = ioba >> SPAPR_TCE_SHIFT;
+   page = stt->pages[idx / TCES_PER_PAGE];
+   tbl = (u64 *)page_address(page);
 
-   /* FIXME: Need to validate the TCE itself */
-   /* udbg_printf("tce @ %p\n", [idx % 
TCES_PER_PAGE]); */
-   tbl[idx % TCES_PER_PAGE] = tce;
-   return H_SUCCESS;
-   }
-   }
+   /* FIXME: Need to validate the TCE itself */
+   /* udbg_printf("tce @ %p\n", [idx % TCES_PER_PAGE]); */
+   tbl[idx % TCES_PER_PAGE] = tce;
 
-   /* Didn't find the liobn, punt it to userspace */
-   return H_TOO_HARD;
+   return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  unsigned long ioba)
 {
-   struct kvm *kvm = vcpu->kvm;
-   struct kvmppc_spapr_tce_table *stt;
+   struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+   long ret = H_TOO_HARD;
 
-   list_for_each_entry(stt, >arch.spapr_tce_tables, list) {
-   if (stt->liobn == liobn) {
+
+   if (stt) {
+   ret = kvmppc_ioba_validate(stt, ioba, 1);
+   if (!ret) {
unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-   struct page *page;
-   u64 *tbl;
-
-   if (ioba >= stt->window_size)
-   return H_PARAMETER;
-
-   page = stt->pages[idx / TCES_PER_PAGE];
-   tbl = (u64 *)page_address(page);