[PATCH] cxl: Fix issues when unmapping contexts

2015-01-06 Thread Ian Munsie
From: Ian Munsie 

An issue was introduced with "cxl: Unmap MMIO regions when detaching a
context" (b123429e6a9e8d03aacf888d23262835f0081448) where closing a
context normally could also unmap the problem state area of other
contexts currently using the AFU.

It was also discovered that after a context's MMIO space had been
unmapped it would read 0s when accessing it, whereas the expected
behaviour was for the access to fail altogether.

In order to address these issues, this patch does two things:

- Forced mmap unmapping is only done when we are forcefully detaching
  all contexts, and not in the normal detach path. Since the normal
  context close path is tied to the file release any mmaps must have
  already been released so we don't need to worry in that case.

- The mmap path now uses a vm_operations_struct with a fault handler.
  The fault handler ensures that the context is in started state,
  otherwise it fails the access attempt with a SIGBUS.

Signed-off-by: Ian Munsie 
---
 drivers/misc/cxl/context.c | 82 +++---
 drivers/misc/cxl/file.c| 14 
 2 files changed, 71 insertions(+), 25 deletions(-)

diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b5..d1b55fe 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct 
cxl_afu *afu, bool master,
return 0;
 }
 
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+   struct cxl_context *ctx = vma->vm_file->private_data;
+   unsigned long address = (unsigned long)vmf->virtual_address;
+   u64 area, offset;
+
+   offset = vmf->pgoff << PAGE_SHIFT;
+
+   pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+   __func__, ctx->pe, address, offset);
+
+   if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+   area = ctx->afu->psn_phys;
+   if (offset > ctx->afu->adapter->ps_size)
+   return VM_FAULT_SIGBUS;
+   } else {
+   area = ctx->psn_phys;
+   if (offset > ctx->psn_size)
+   return VM_FAULT_SIGBUS;
+   }
+
+   mutex_lock(>status_mutex);
+
+   if (ctx->status != STARTED) {
+   mutex_unlock(>status_mutex);
+   pr_devel("%s: Context not started, failing problem state 
access\n", __func__);
+   return VM_FAULT_SIGBUS;
+   }
+
+   vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+   mutex_unlock(>status_mutex);
+
+   return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+   .fault = cxl_mmap_fault,
+};
+
 /*
  * Map a per-context mmio space into the given vma.
  */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct 
vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
 
-   if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
-   vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-   return vm_iomap_memory(vma, ctx->afu->psn_phys, 
ctx->afu->adapter->ps_size);
-   }
+   if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+   /* make sure there is a valid per process space for this AFU */
+   if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+   pr_devel("AFU doesn't support mmio space\n");
+   return -EINVAL;
+   }
 
-   /* make sure there is a valid per process space for this AFU */
-   if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
-   pr_devel("AFU doesn't support mmio space\n");
-   return -EINVAL;
+   /* Can't mmap until the AFU is enabled */
+   if (!ctx->afu->enabled)
+   return -EBUSY;
}
 
-   /* Can't mmap until the AFU is enabled */
-   if (!ctx->afu->enabled)
-   return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
 ctx->psn_phys, ctx->pe , ctx->master);
 
+   vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-   return vm_iomap_memory(vma, ctx->psn_phys, len);
+   vma->vm_ops = _mmap_vmops;
+   return 0;
 }
 
 /*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(>fault_work); /* Only needed for dedicated process */
wake_up_all(>wq);
-
-   /* Release Problem State Area mapping */
-   mutex_lock(>mapping_lock);
-   if (ctx->mapping)
-   unmap_mapping_range(ctx->mapping, 0, 0, 1);
-   mutex_unlock(>mapping_lock);
 }
 
 /*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
 * created and torn down after the IDR 

[PATCH] cxl: Fix issues when unmapping contexts

2015-01-06 Thread Ian Munsie
From: Ian Munsie imun...@au1.ibm.com

An issue was introduced with cxl: Unmap MMIO regions when detaching a
context (b123429e6a9e8d03aacf888d23262835f0081448) where closing a
context normally could also unmap the problem state area of other
contexts currently using the AFU.

It was also discovered that after a context's MMIO space had been
unmapped it would read 0s when accessing it, whereas the expected
behaviour was for the access to fail altogether.

In order to address these issues, this patch does two things:

- Forced mmap unmapping is only done when we are forcefully detaching
  all contexts, and not in the normal detach path. Since the normal
  context close path is tied to the file release any mmaps must have
  already been released so we don't need to worry in that case.

- The mmap path now uses a vm_operations_struct with a fault handler.
  The fault handler ensures that the context is in started state,
  otherwise it fails the access attempt with a SIGBUS.

Signed-off-by: Ian Munsie imun...@au1.ibm.com
---
 drivers/misc/cxl/context.c | 82 +++---
 drivers/misc/cxl/file.c| 14 
 2 files changed, 71 insertions(+), 25 deletions(-)

diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b5..d1b55fe 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct 
cxl_afu *afu, bool master,
return 0;
 }
 
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+   struct cxl_context *ctx = vma-vm_file-private_data;
+   unsigned long address = (unsigned long)vmf-virtual_address;
+   u64 area, offset;
+
+   offset = vmf-pgoff  PAGE_SHIFT;
+
+   pr_devel(%s: pe: %i address: 0x%lx offset: 0x%llx\n,
+   __func__, ctx-pe, address, offset);
+
+   if (ctx-afu-current_mode == CXL_MODE_DEDICATED) {
+   area = ctx-afu-psn_phys;
+   if (offset  ctx-afu-adapter-ps_size)
+   return VM_FAULT_SIGBUS;
+   } else {
+   area = ctx-psn_phys;
+   if (offset  ctx-psn_size)
+   return VM_FAULT_SIGBUS;
+   }
+
+   mutex_lock(ctx-status_mutex);
+
+   if (ctx-status != STARTED) {
+   mutex_unlock(ctx-status_mutex);
+   pr_devel(%s: Context not started, failing problem state 
access\n, __func__);
+   return VM_FAULT_SIGBUS;
+   }
+
+   vm_insert_pfn(vma, address, (area + offset)  PAGE_SHIFT);
+
+   mutex_unlock(ctx-status_mutex);
+
+   return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+   .fault = cxl_mmap_fault,
+};
+
 /*
  * Map a per-context mmio space into the given vma.
  */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct 
vm_area_struct *vma)
u64 len = vma-vm_end - vma-vm_start;
len = min(len, ctx-psn_size);
 
-   if (ctx-afu-current_mode == CXL_MODE_DEDICATED) {
-   vma-vm_page_prot = pgprot_noncached(vma-vm_page_prot);
-   return vm_iomap_memory(vma, ctx-afu-psn_phys, 
ctx-afu-adapter-ps_size);
-   }
+   if (ctx-afu-current_mode != CXL_MODE_DEDICATED) {
+   /* make sure there is a valid per process space for this AFU */
+   if ((ctx-master  !ctx-afu-psa) || (!ctx-afu-pp_psa)) {
+   pr_devel(AFU doesn't support mmio space\n);
+   return -EINVAL;
+   }
 
-   /* make sure there is a valid per process space for this AFU */
-   if ((ctx-master  !ctx-afu-psa) || (!ctx-afu-pp_psa)) {
-   pr_devel(AFU doesn't support mmio space\n);
-   return -EINVAL;
+   /* Can't mmap until the AFU is enabled */
+   if (!ctx-afu-enabled)
+   return -EBUSY;
}
 
-   /* Can't mmap until the AFU is enabled */
-   if (!ctx-afu-enabled)
-   return -EBUSY;
-
pr_devel(%s: mmio physical: %llx pe: %i master:%i\n, __func__,
 ctx-psn_phys, ctx-pe , ctx-master);
 
+   vma-vm_flags |= VM_IO | VM_PFNMAP;
vma-vm_page_prot = pgprot_noncached(vma-vm_page_prot);
-   return vm_iomap_memory(vma, ctx-psn_phys, len);
+   vma-vm_ops = cxl_mmap_vmops;
+   return 0;
 }
 
 /*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(ctx-fault_work); /* Only needed for dedicated process */
wake_up_all(ctx-wq);
-
-   /* Release Problem State Area mapping */
-   mutex_lock(ctx-mapping_lock);
-   if (ctx-mapping)
-   unmap_mapping_range(ctx-mapping, 0, 0, 1);
-   mutex_unlock(ctx-mapping_lock);
 }
 
 /*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
 * created and torn down after the IDR removed