[RFC PATCH v1 22/31] ARC: Page Fault handling (incl uaccess fixup)

2012-11-07 Thread Vineet Gupta
This includes recent changes to make handler "retry" and/or "killable"

The killable (early exit) logic is loosely based on how SH implements it
return if SIGKILL + either of VM_FAULT_OOM or VM_FAULT_RETRY
which is different from Hexagon implementation which would NOT early
exit for
SIGKILL + VM_FAULT_OOM + !VM_FAULT_RETRY

Signed-off-by: Vineet Gupta 
---
 arch/arc/mm/extable.c |   26 ++
 arch/arc/mm/fault.c   |  228 +
 2 files changed, 254 insertions(+), 0 deletions(-)
 create mode 100644 arch/arc/mm/extable.c
 create mode 100644 arch/arc/mm/fault.c

diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 000..f05b677
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include 
+#include 
+
+int fixup_exception(struct pt_regs *regs)
+{
+   const struct exception_table_entry *fixup;
+
+   fixup = search_exception_tables(instruction_pointer(regs));
+   if (fixup) {
+   regs->ret = fixup->fixup;
+
+   return 1;
+   }
+
+   return 0;
+}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 000..af55aab
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,228 @@
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+{
+   /*
+* Synchronize this task's top level page-table
+* with the 'reference' page table.
+*/
+   pgd_t *pgd, *pgd_k;
+   pud_t *pud, *pud_k;
+   pmd_t *pmd, *pmd_k;
+
+   pgd = pgd_offset_fast(mm, address);
+   pgd_k = pgd_offset_k(address);
+
+   if (!pgd_present(*pgd_k))
+   goto bad_area;
+
+   pud = pud_offset(pgd, address);
+   pud_k = pud_offset(pgd_k, address);
+   if (!pud_present(*pud_k))
+   goto bad_area;
+
+   pmd = pmd_offset(pud, address);
+   pmd_k = pmd_offset(pud_k, address);
+   if (!pmd_present(*pmd_k))
+   goto bad_area;
+
+   set_pmd(pmd, *pmd_k);
+
+   /* XXX: create the TLB entry here */
+   return 0;
+
+bad_area:
+   return 1;
+}
+
+void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
+  unsigned long cause_code)
+{
+   struct vm_area_struct *vma = NULL;
+   struct task_struct *tsk = current;
+   struct mm_struct *mm = tsk->mm;
+   siginfo_t info;
+   int fault, ret;
+   unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+   (write ? FAULT_FLAG_WRITE : 0);
+
+   /*
+* We fault-in kernel-space virtual memory on-demand. The
+* 'reference' page table is init_mm.pgd.
+*
+* NOTE! We MUST NOT take any locks for this case. We may
+* be in an interrupt or a critical region, and should
+* only copy the information from the master page table,
+* nothing more.
+*/
+   if (address >= VMALLOC_START && address <= VMALLOC_END) {
+   ret = handle_vmalloc_fault(mm, address);
+   if (unlikely(ret))
+   goto bad_area_nosemaphore;
+   else
+   return;
+   }
+
+   info.si_code = SEGV_MAPERR;
+
+   /*
+* If we're in an interrupt or have no user
+* context, we must not take the fault..
+*/
+   if (in_atomic() || !mm)
+   goto no_context;
+
+retry:
+   down_read(>mmap_sem);
+   vma = find_vma(mm, address);
+   if (!vma)
+   goto bad_area;
+   if (vma->vm_start <= address)
+   goto good_area;
+   if (!(vma->vm_flags & VM_GROWSDOWN))
+   goto bad_area;
+   if (expand_stack(vma, address))
+   goto bad_area;
+
+   /*
+* Ok, we have a good vm_area for this memory access, so
+* we can handle it..
+*/
+good_area:
+   info.si_code = SEGV_ACCERR;
+
+   /* Handle protection violation, execute on heap or stack */
+
+   if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
+   goto bad_area;
+
+   if (write) {
+   if (!(vma->vm_flags & VM_WRITE))
+   goto 

[RFC PATCH v1 22/31] ARC: Page Fault handling (incl uaccess fixup)

2012-11-07 Thread Vineet Gupta
This includes recent changes to make handler retry and/or killable

The killable (early exit) logic is loosely based on how SH implements it
return if SIGKILL + either of VM_FAULT_OOM or VM_FAULT_RETRY
which is different from Hexagon implementation which would NOT early
exit for
SIGKILL + VM_FAULT_OOM + !VM_FAULT_RETRY

Signed-off-by: Vineet Gupta vgu...@synopsys.com
---
 arch/arc/mm/extable.c |   26 ++
 arch/arc/mm/fault.c   |  228 +
 2 files changed, 254 insertions(+), 0 deletions(-)
 create mode 100644 arch/arc/mm/extable.c
 create mode 100644 arch/arc/mm/fault.c

diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 000..f05b677
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include linux/module.h
+#include linux/uaccess.h
+
+int fixup_exception(struct pt_regs *regs)
+{
+   const struct exception_table_entry *fixup;
+
+   fixup = search_exception_tables(instruction_pointer(regs));
+   if (fixup) {
+   regs-ret = fixup-fixup;
+
+   return 1;
+   }
+
+   return 0;
+}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 000..af55aab
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,228 @@
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include linux/signal.h
+#include linux/interrupt.h
+#include linux/sched.h
+#include linux/errno.h
+#include linux/ptrace.h
+#include linux/version.h
+#include linux/uaccess.h
+#include linux/kdebug.h
+#include asm/pgalloc.h
+
+static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+{
+   /*
+* Synchronize this task's top level page-table
+* with the 'reference' page table.
+*/
+   pgd_t *pgd, *pgd_k;
+   pud_t *pud, *pud_k;
+   pmd_t *pmd, *pmd_k;
+
+   pgd = pgd_offset_fast(mm, address);
+   pgd_k = pgd_offset_k(address);
+
+   if (!pgd_present(*pgd_k))
+   goto bad_area;
+
+   pud = pud_offset(pgd, address);
+   pud_k = pud_offset(pgd_k, address);
+   if (!pud_present(*pud_k))
+   goto bad_area;
+
+   pmd = pmd_offset(pud, address);
+   pmd_k = pmd_offset(pud_k, address);
+   if (!pmd_present(*pmd_k))
+   goto bad_area;
+
+   set_pmd(pmd, *pmd_k);
+
+   /* XXX: create the TLB entry here */
+   return 0;
+
+bad_area:
+   return 1;
+}
+
+void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
+  unsigned long cause_code)
+{
+   struct vm_area_struct *vma = NULL;
+   struct task_struct *tsk = current;
+   struct mm_struct *mm = tsk-mm;
+   siginfo_t info;
+   int fault, ret;
+   unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+   (write ? FAULT_FLAG_WRITE : 0);
+
+   /*
+* We fault-in kernel-space virtual memory on-demand. The
+* 'reference' page table is init_mm.pgd.
+*
+* NOTE! We MUST NOT take any locks for this case. We may
+* be in an interrupt or a critical region, and should
+* only copy the information from the master page table,
+* nothing more.
+*/
+   if (address = VMALLOC_START  address = VMALLOC_END) {
+   ret = handle_vmalloc_fault(mm, address);
+   if (unlikely(ret))
+   goto bad_area_nosemaphore;
+   else
+   return;
+   }
+
+   info.si_code = SEGV_MAPERR;
+
+   /*
+* If we're in an interrupt or have no user
+* context, we must not take the fault..
+*/
+   if (in_atomic() || !mm)
+   goto no_context;
+
+retry:
+   down_read(mm-mmap_sem);
+   vma = find_vma(mm, address);
+   if (!vma)
+   goto bad_area;
+   if (vma-vm_start = address)
+   goto good_area;
+   if (!(vma-vm_flags  VM_GROWSDOWN))
+   goto bad_area;
+   if (expand_stack(vma, address))
+   goto bad_area;
+
+   /*
+* Ok, we have a good vm_area for this memory access, so
+* we can handle it..
+*/
+good_area:
+   info.si_code = SEGV_ACCERR;
+
+   /* Handle protection violation, execute on heap or stack */
+
+   if (cause_code == ((ECR_V_PROTV  16) |