This is a patch that adds Memory Protection Unit support for no-MMU ARM.
The default MPU configuration on most ARM7-based cores only allows MPU
control of the first 1MB of address space. However this can be easily
changed, and on our core the MPU controls the first 1GB of address
space, and the space above that is accessible only in privileged kernel
mode. With this the MPU actually becomes a useful means of restricting
userland access to kernel space in uClinux.
This is the first in a set of three patches that provide a way to
completely isolate userland from read/write access to kernel memory space.
This first patch provides the base MPU framework.
The platform-level code must implement a 'struct mpu' object that
describes the MPU configuration of the core, and how to enable and
disable MPU regions in the core. An example working implementation of
'struct mpu' is provided in arch/arm/mm/sc100.c.
A process keeps track of its MPU regions with a 'struct mpu_region'
object attached to its VMA's. During a context switch, the old processes
mpu_regions are disabled, and the new processes mpu_regions are enabled,
similar to how process page tables are switched. See switch_mm() for the
no-MMU case.
The next patch will implement a private user memory pool that processes
allocate from, instead of allocating from kernel memory with kmalloc. It
uses the general purpose allocator library (genalloc.c). In combination
with the MPU support, userland by default has access only to this
private pool of memory.
The platform-level code will normally initialize two "background" MPU
regions that are always active. The first gives kernel read/write access
to all of memory space. The second gives user-level read/write access to
the user memory pool.
MPU regions that are attached to process VMA's are due to mmap()
requests for XIP private file mappings. Note that non-XIP file mappings
allocate memory for the mapping from the user memory pool, so with the
user memory background MPU region there is no need to create a new
mpu-region for the mapping.
This patch is against the uclinux-888 release kernel.
Steve
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/arch/arm/mm/Makefile linux-2.6.x/arch/arm/mm/Makefile
--- linux-2.6.x.orig/arch/arm/mm/Makefile 2010-08-22 15:22:54.391149924 -0700
+++ linux-2.6.x/arch/arm/mm/Makefile 2010-08-22 15:43:33.761212277 -0700
@@ -9,6 +9,7 @@
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o consistent-nommu.o
+obj-$(CONFIG_CPU_CP15_MPU) += mpu.o
endif
obj-$(CONFIG_MODULES) += proc-syms.o
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/arch/arm/mm/mpu.c linux-2.6.x/arch/arm/mm/mpu.c
--- linux-2.6.x.orig/arch/arm/mm/mpu.c 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.x/arch/arm/mm/mpu.c 2010-08-22 15:29:28.916058187 -0700
@@ -0,0 +1,197 @@
+/*
+ * linux/arch/arm/mm/mpu.c
+ *
+ * Support for no-MMU processors that contain a Memory Protection Unit (MPU).
+ *
+ * Copyright (C) 2009 Netspectrum Communication Systems, Inc.
+ * Copyright (C) 2009 Silicon Storage Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mm.h>
+#include <asm/mpu.h>
+
+static struct mpu * mpu = NULL;
+
+static const char * access_str[] = {
+ [MPU_ACCESS_NONE] = "no access",
+ [MPU_ACCESS_RO] = "r/o",
+ [MPU_ACCESS_RW] = "r/w",
+};
+
+/*
+ * Static background regions are allocated from lowest priority. The
+ * first user region requested will fix highest_bg_prio, and no more
+ * background region requests will be allowed after that.
+ */
+static int highest_bg_prio = 0;
+static int bg_prio_fixed = 0;
+
+static inline int find_free_prio(struct mm_struct * mm)
+{
+ struct vm_list_struct *vml;
+ int prio, inuse;
+ int ret = -ENOSPC;
+
+ for (prio = highest_bg_prio; prio < mpu->num_prio; prio++) {
+ inuse = 0;
+ for (vml = mm->context.vmlist; vml; vml = vml->next) {
+ if (!vml->vma || !vml->vma->vm_mpu)
+ continue;
+ if (prio == vml->vma->vm_mpu->prio) {
+ inuse = 1;
+ break;
+ }
+ }
+ if (!inuse) {
+ ret = prio;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static inline int alloc_prio(struct mm_struct * mm)
+{
+ int prio;
+
+ if (!mm) {
+ if (bg_prio_fixed)
+ return -EINVAL;
+ if (highest_bg_prio >= mpu->num_prio - 1)
+ return -ENOSPC;
+ prio = highest_bg_prio++;
+ } else {
+ bg_prio_fixed = 1;
+ prio = find_free_prio(mm);
+ }
+
+ return prio;
+}
+
+/*
+ * The mm->mmap_sem should be held atleast for read when mm != NULL.
+ */
+int mpu_add_region(struct mm_struct * mm, struct mpu_region * region)
+{
+ int ret;
+
+ if (!mpu || !mpu->add_region)
+ return -ENODEV;
+ if (!region ||
+ region->base > mpu->max_addr ||
+ region->limit > mpu->max_addr ||
+ region->base > region->limit)
+ return -EINVAL;
+
+ ret = alloc_prio(mm);
+ if (ret < 0)
+ return ret;
+ region->prio = ret;
+
+ /*
+ * make sure region base/limit are aligned according to MPU h/w
+ * requirement.
+ */
+ region->base &= ~(mpu->region_align - 1);
+ region->limit &= ~(mpu->region_align - 1);
+
+ /* add the region in h/w */
+ return mpu->add_region(region);
+}
+
+int mpu_del_region(struct mm_struct * mm, struct mpu_region * region)
+{
+ if (!mpu || !mpu->del_region)
+ return -ENODEV;
+ if (!region ||
+ region->prio >= mpu->num_prio ||
+ region->base > mpu->max_addr ||
+ region->limit > mpu->max_addr ||
+ region->base > region->limit)
+ return -EINVAL;
+
+ /* remove the region in h/w */
+ return mpu->del_region(region);
+}
+
+int mpu_enable_region(struct mpu_region * region)
+{
+ if (!mpu || !mpu->enable_region)
+ return -ENODEV;
+ if (!region ||
+ region->prio >= mpu->num_prio ||
+ region->base > mpu->max_addr ||
+ region->limit > mpu->max_addr ||
+ region->base > region->limit)
+ return -EINVAL;
+
+ /* enable the region in h/w */
+ return mpu->enable_region(region);
+}
+
+int mpu_disable_region(struct mpu_region * region)
+{
+ if (!mpu || !mpu->disable_region)
+ return -ENODEV;
+ if (!region ||
+ region->prio >= mpu->num_prio ||
+ region->base > mpu->max_addr ||
+ region->limit > mpu->max_addr ||
+ region->base > region->limit)
+ return -EINVAL;
+
+ /* disable the region in h/w */
+ return mpu->disable_region(region);
+}
+
+int mpu_print_region(struct mpu_region * region)
+{
+ struct mpu_region r = *region;
+
+ if (!mpu || !mpu->read_region)
+ return -ENODEV;
+ if (!region)
+ return -EINVAL;
+
+ mpu->read_region(&r);
+
+ printk(KERN_INFO "MPU region %d: base %08lx, limit %08lx, "
+ "priv %s user %s\n", r.prio, r.base, r.limit,
+ access_str[r.priv_access], access_str[r.user_access]);
+ return 0;
+}
+
+int mpu_enable(void)
+{
+ if (!mpu || !mpu->enable_mpu)
+ return -ENODEV;
+
+ /* enable the MPU in h/w */
+ mpu->enable_mpu();
+ return 0;
+}
+
+int mpu_disable(void)
+{
+ if (!mpu || !mpu->disable_mpu)
+ return -ENODEV;
+
+ /* disable the MPU in h/w */
+ mpu->disable_mpu();
+ return 0;
+}
+
+int mpu_init(struct mpu * the_mpu)
+{
+ mpu = the_mpu;
+ mpu_disable();
+ return 0;
+}
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/arch/arm/mm/sc100.c linux-2.6.x/arch/arm/mm/sc100.c
--- linux-2.6.x.orig/arch/arm/mm/sc100.c 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.x/arch/arm/mm/sc100.c 2010-08-22 15:29:28.919149487 -0700
@@ -0,0 +1,336 @@
+/*
+ * linux/arch/arm/mm/sc100.c
+ *
+ * SC100-specific processor support (MPU).
+ *
+ * Copyright (C) 2009 Netspectrum Communication Systems, Inc.
+ * Copyright (C) 2009 Silicon Storage Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/sc100.h>
+#include <asm/setup.h>
+#include <asm/system.h>
+
+#define SC100_MPU_BLOCK 64
+#define SC100_MPU_NUM_REGIONS 8
+#define SC100_MPU_LIMIT (1024 * 1024)
+
+enum sc100_region_access {
+ NO_ACCESS = 0,
+ PRIV_RW_USER_NO_ACCESS,
+ PRIV_RW_USER_RO,
+ PRIV_RW_USER_RW,
+};
+
+void sc100_alignment_faults_enable(void)
+{
+ /* enable alignment faults in CP15 reg 1 */
+ u32 value;
+ __asm__ __volatile__("mrc p15, 0, %0, c1, c0, 0\n": "=r"(value));
+ value |= (1 << 1);
+ __asm__ __volatile__("mcr p15, 0, %0, c1, c0, 0\n": :"r"(value));
+}
+
+static void sc100_mpu_enable(void)
+{
+ /* enable MPU in CP15 reg 1 */
+ u32 value;
+ __asm__ __volatile__("mrc p15, 0, %0, c1, c0, 0\n": "=r"(value));
+ value |= (1 << 0);
+ __asm__ __volatile__("mcr p15, 0, %0, c1, c0, 0\n": :"r"(value));
+}
+
+static void sc100_mpu_disable(void)
+{
+ /* disable MPU in CP15 reg 1 */
+ u32 value;
+ __asm__ __volatile__("mrc p15, 0, %0, c1, c0, 0\n": "=r"(value));
+ value &= ~(1 << 0);
+ __asm__ __volatile__("mcr p15, 0, %0, c1, c0, 0\n": :"r"(value));
+}
+
+/*
+ * set MPU base for region n in CP15 reg 6.
+ * TODO: is there a more elegant way to do this? How can I specify a variable
+ * CRm in inline assembly?
+ */
+static inline void set_mpu_base(int n, u32 value, int enable)
+{
+ if (enable)
+ value |= (1 << 0); /* enable this region */
+ else
+ value &= ~(1 << 0); /* disable this region */
+
+ switch (n) {
+ case 0:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c0, 0": :"r"(value));
+ break;
+ case 1:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c1, 0": :"r"(value));
+ break;
+ case 2:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c2, 0": :"r"(value));
+ break;
+ case 3:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c3, 0": :"r"(value));
+ break;
+ case 4:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c4, 0": :"r"(value));
+ break;
+ case 5:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c5, 0": :"r"(value));
+ break;
+ case 6:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c6, 0": :"r"(value));
+ break;
+ case 7:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c7, 0": :"r"(value));
+ break;
+ }
+}
+
+/*
+ * read MPU base for region n in CP15 reg 6.
+ * TODO: is there a more elegant way to do this? How can I specify a variable
+ * CRm in inline assembly?
+ */
+static inline u32 get_mpu_base(int n)
+{
+ u32 ret = 0;
+
+ switch (n) {
+ case 0:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0": "=r"(ret));
+ break;
+ case 1:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c1, 0": "=r"(ret));
+ break;
+ case 2:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c2, 0": "=r"(ret));
+ break;
+ case 3:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c3, 0": "=r"(ret));
+ break;
+ case 4:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c4, 0": "=r"(ret));
+ break;
+ case 5:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c5, 0": "=r"(ret));
+ break;
+ case 6:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c6, 0": "=r"(ret));
+ break;
+ case 7:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c7, 0": "=r"(ret));
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * set MPU limit for region n in CP15 reg 6.
+ * TODO: is there a more elegant way to do this? How can I specify a variable
+ * CRm in inline assembly?
+ */
+static inline void set_mpu_limit(int n, u32 value)
+{
+ switch (n) {
+ case 0:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c8, 0": :"r"(value));
+ break;
+ case 1:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c9, 0": :"r"(value));
+ break;
+ case 2:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c10, 0": :"r"(value));
+ break;
+ case 3:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c11, 0": :"r"(value));
+ break;
+ case 4:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c12, 0": :"r"(value));
+ break;
+ case 5:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c13, 0": :"r"(value));
+ break;
+ case 6:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c14, 0": :"r"(value));
+ break;
+ case 7:
+ __asm__ __volatile__("mcr p15, 0, %0, c6, c15, 0": :"r"(value));
+ break;
+ }
+}
+
+/*
+ * read MPU limit for region n in CP15 reg 6.
+ * TODO: is there a more elegant way to do this? How can I specify a variable
+ * CRm in inline assembly?
+ */
+static inline u32 get_mpu_limit(int n)
+{
+ u32 ret = 0;
+
+ switch (n) {
+ case 0:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c8, 0": "=r"(ret));
+ break;
+ case 1:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c9, 0": "=r"(ret));
+ break;
+ case 2:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c10, 0": "=r"(ret));
+ break;
+ case 3:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c11, 0": "=r"(ret));
+ break;
+ case 4:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c12, 0": "=r"(ret));
+ break;
+ case 5:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c13, 0": "=r"(ret));
+ break;
+ case 6:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c14, 0": "=r"(ret));
+ break;
+ case 7:
+ __asm__ __volatile__("mrc p15, 0, %0, c6, c15, 0": "=r"(ret));
+ break;
+ }
+
+ return ret;
+}
+
+static inline enum sc100_region_access
+to_sc100_access(struct mpu_region * region)
+{
+ if (region->priv_access == MPU_ACCESS_NONE) {
+ return NO_ACCESS;
+ } else {
+ switch (region->user_access) {
+ case MPU_ACCESS_NONE:
+ return PRIV_RW_USER_NO_ACCESS;
+ case MPU_ACCESS_RO:
+ return PRIV_RW_USER_RO;
+ case MPU_ACCESS_RW:
+ return PRIV_RW_USER_RW;
+ }
+ }
+
+ return NO_ACCESS;
+}
+
+/* set access type in CP15 reg 5 */
+static inline void set_mpu_access(int n, enum sc100_region_access access)
+{
+ u32 value;
+ __asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0\n": "=r"(value));
+ value &= ~(0x3 << (2 * n));
+ value |= (access << (2 * n));
+ __asm__ __volatile__("mcr p15, 0, %0, c5, c0, 0\n": :"r"(value));
+}
+
+/* read access type in CP15 reg 5 */
+static inline enum sc100_region_access get_mpu_access(int n)
+{
+ u32 value;
+ __asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0\n": "=r"(value));
+ value >>= (2 * n);
+ value &= 0x3;
+ return (enum sc100_region_access)value;
+}
+
+static inline enum mpu_region_access get_mpu_priv_access(int n)
+{
+ enum sc100_region_access access = get_mpu_access(n);
+ switch (access) {
+ case NO_ACCESS:
+ return MPU_ACCESS_NONE;
+ case PRIV_RW_USER_NO_ACCESS:
+ case PRIV_RW_USER_RO:
+ case PRIV_RW_USER_RW:
+ return MPU_ACCESS_RW;
+ }
+ return MPU_ACCESS_NONE;
+}
+
+static inline enum mpu_region_access get_mpu_user_access(int n)
+{
+ enum sc100_region_access access = get_mpu_access(n);
+ switch (access) {
+ case NO_ACCESS:
+ case PRIV_RW_USER_NO_ACCESS:
+ return MPU_ACCESS_NONE;
+ case PRIV_RW_USER_RO:
+ return MPU_ACCESS_RO;
+ case PRIV_RW_USER_RW:
+ return MPU_ACCESS_RW;
+ }
+ return MPU_ACCESS_NONE;
+}
+
+static int sc100_mpu_add_region(struct mpu_region * region)
+{
+ set_mpu_base(region->prio, region->base, 0);
+ set_mpu_limit(region->prio, region->limit);
+ set_mpu_access(region->prio, to_sc100_access(region));
+ return 0;
+}
+
+static int sc100_mpu_del_region(struct mpu_region * region)
+{
+ set_mpu_base(region->prio, 0, 0);
+ set_mpu_limit(region->prio, 0);
+ set_mpu_access(region->prio, NO_ACCESS);
+ return 0;
+}
+
+static int sc100_mpu_enable_region(struct mpu_region * region)
+{
+ set_mpu_base(region->prio, region->base, 0);
+ set_mpu_limit(region->prio, region->limit);
+ set_mpu_access(region->prio, to_sc100_access(region));
+ set_mpu_base(region->prio, region->base, 1);
+ return 0;
+}
+
+static int sc100_mpu_disable_region(struct mpu_region * region)
+{
+ set_mpu_base(region->prio, region->base, 0);
+ return 0;
+}
+
+static int sc100_mpu_read_region(struct mpu_region * region)
+{
+ region->base = get_mpu_base(region->prio);
+ region->limit = get_mpu_limit(region->prio);
+ region->priv_access = get_mpu_priv_access(region->prio);
+ region->user_access = get_mpu_user_access(region->prio);
+ return 0;
+}
+
+struct mpu sc100_mpu = {
+ .num_prio = SC100_MPU_NUM_REGIONS,
+ .max_addr = SC100_MPU_LIMIT,
+ .region_align = SC100_MPU_BLOCK,
+
+ .enable_mpu = sc100_mpu_enable,
+ .disable_mpu = sc100_mpu_disable,
+ .add_region = sc100_mpu_add_region,
+ .del_region = sc100_mpu_del_region,
+ .enable_region = sc100_mpu_enable_region,
+ .disable_region = sc100_mpu_disable_region,
+ .read_region = sc100_mpu_read_region,
+};
+
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/include/asm-arm/mmu_context.h linux-2.6.x/include/asm-arm/mmu_context.h
--- linux-2.6.x.orig/include/asm-arm/mmu_context.h 2010-08-22 15:22:54.034149760 -0700
+++ linux-2.6.x/include/asm-arm/mmu_context.h 2010-08-22 15:29:28.923149519 -0700
@@ -16,6 +16,9 @@
#include <linux/compiler.h>
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
+#ifndef CONFIG_MMU
+#include <asm/mpu.h>
+#endif
#include <asm-generic/mm_hooks.h>
void __check_kvm_seq(struct mm_struct *mm);
@@ -92,11 +95,11 @@
* calling the CPU specific function when the mm hasn't
* actually changed.
*/
+#ifdef CONFIG_MMU
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
-#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
@@ -105,8 +108,33 @@
if (cache_is_vivt())
cpu_clear(cpu, prev->cpu_vm_mask);
}
-#endif
}
+#else
+/* should hold mmap_sem atleast for read before calling */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ struct vm_list_struct *vml;
+
+ if (prev != next) {
+ if (prev) {
+ for (vml = prev->context.vmlist; vml; vml = vml->next) {
+ if (!vml->vma || !vml->vma->vm_mpu)
+ continue;
+ mpu_disable_region(vml->vma->vm_mpu);
+ }
+ }
+ if (next) {
+ for (vml = next->context.vmlist; vml; vml = vml->next) {
+ if (!vml->vma || !vml->vma->vm_mpu)
+ continue;
+ mpu_enable_region(vml->vma->vm_mpu);
+ }
+ }
+ }
+}
+#endif /* CONFIG_MMU */
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/include/asm-arm/mmu.h linux-2.6.x/include/asm-arm/mmu.h
--- linux-2.6.x.orig/include/asm-arm/mmu.h 2010-08-22 15:22:54.035149994 -0700
+++ linux-2.6.x/include/asm-arm/mmu.h 2010-08-22 15:29:28.926149130 -0700
@@ -18,6 +18,8 @@
#else
+#include <asm/mpu.h>
+
/*
* From nommu.h:
* Copyright (C) 2002, David McCullough <dav...@snapgear.com>
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/include/asm-arm/mpu.h linux-2.6.x/include/asm-arm/mpu.h
--- linux-2.6.x.orig/include/asm-arm/mpu.h 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.x/include/asm-arm/mpu.h 2010-08-22 15:29:28.926149130 -0700
@@ -0,0 +1,101 @@
+/*
+ * linux/include/asm-arm/mpu.h
+ *
+ * MPU support on no-MMU processors.
+ *
+ * Copyright (C) 2009 Netspectrum Communication Systems, Inc.
+ * Copyright (C) 2009 Silicon Storage Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_MPU_H
+#define __ASM_ARM_MPU_H
+
+enum mpu_region_access {
+ MPU_ACCESS_NONE = 0,
+ MPU_ACCESS_RO,
+ MPU_ACCESS_RW,
+};
+
+struct mpu_region;
+
+struct mpu {
+ int num_prio;
+ unsigned long max_addr;
+ unsigned long region_align;
+
+ void (*enable_mpu)(void);
+ void (*disable_mpu)(void);
+ int (*add_region)(struct mpu_region * region);
+ int (*del_region)(struct mpu_region * region);
+ int (*enable_region)(struct mpu_region * region);
+ int (*disable_region)(struct mpu_region * region);
+ int (*read_region)(struct mpu_region * region);
+};
+
+struct mpu_region {
+ int prio;
+ unsigned long base;
+ unsigned long limit;
+ enum mpu_region_access priv_access;
+ enum mpu_region_access user_access;
+};
+
+#ifdef CONFIG_CPU_CP15_MPU
+
+extern int mpu_add_region(struct mm_struct * mm,
+ struct mpu_region * region);
+extern int mpu_del_region(struct mm_struct * mm,
+ struct mpu_region * region);
+extern int mpu_enable_region(struct mpu_region * region);
+extern int mpu_disable_region(struct mpu_region * region);
+extern int mpu_print_region(struct mpu_region * region);
+
+extern int mpu_enable(void);
+extern int mpu_disable(void);
+extern int mpu_init(struct mpu * mpu);
+
+#else /* CONFIG_CPU_CP15_MPU */
+
+static inline int mpu_add_region(struct mm_struct * mm,
+ struct mpu_region * region)
+{
+ return 0;
+}
+static inline int mpu_del_region(struct mm_struct * mm,
+ struct mpu_region * region)
+{
+ return 0;
+}
+static inline int mpu_enable_region(struct mpu_region * region)
+{
+ return 0;
+}
+static inline int mpu_disable_region(struct mpu_region * region)
+{
+ return 0;
+}
+static inline int mpu_print_region(struct mpu_region * region)
+{
+ return 0;
+}
+
+static inline int mpu_enable(void)
+{
+ return 0;
+}
+static inline int mpu_disable(void)
+{
+ return 0;
+}
+static inline int mpu_init(struct mpu * mpu)
+{
+ return 0;
+}
+
+#endif /* CONFIG_CPU_CP15_MPU */
+
+#endif
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/include/linux/mm_types.h linux-2.6.x/include/linux/mm_types.h
--- linux-2.6.x.orig/include/linux/mm_types.h 2010-08-22 15:22:53.694149349 -0700
+++ linux-2.6.x/include/linux/mm_types.h 2010-08-22 15:29:28.927150212 -0700
@@ -12,6 +12,9 @@
#include <linux/completion.h>
#include <asm/page.h>
#include <asm/mmu.h>
+#ifndef CONFIG_MMU
+#include <asm/mpu.h>
+#endif
#ifndef AT_VECTOR_SIZE_ARCH
#define AT_VECTOR_SIZE_ARCH 0
@@ -150,6 +153,7 @@
#ifndef CONFIG_MMU
atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
+ struct mpu_region * vm_mpu;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
diff -Nuar -X /home/stevel/dontdiff linux-2.6.x.orig/mm/nommu.c linux-2.6.x/mm/nommu.c
--- linux-2.6.x.orig/mm/nommu.c 2010-08-22 15:22:54.081149164 -0700
+++ linux-2.6.x/mm/nommu.c 2010-08-22 15:51:51.910181083 -0700
@@ -33,6 +33,7 @@
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/mpu.h>
void *high_memory;
struct page *mem_map;
@@ -105,7 +106,11 @@
{
struct page *page;
- if (!objp || !((page = virt_to_page(objp))))
+ /*
+ * If the object we have should not have ksize performed on it,
+ * return size of 0
+ */
+ if (!objp || objp >= high_memory || !((page = virt_to_page(objp))))
return 0;
if (PageSlab(page))
@@ -740,6 +745,44 @@
return vm_flags;
}
+#ifdef CONFIG_CPU_CP15_MPU
+/*
+ * setup MPU access for direct file mappings
+ */
+static int add_mpu_region(struct vm_area_struct *vma)
+{
+ struct mpu_region region;
+ int ret;
+
+ region.base = vma->vm_start;
+ region.limit = vma->vm_end - 1;
+ region.user_access =
+ (vma->vm_flags & VM_WRITE) ? MPU_ACCESS_RW : MPU_ACCESS_RO;
+ region.priv_access = MPU_ACCESS_RW;
+ ret = mpu_add_region(vma->vm_mm, ®ion);
+ if (ret) {
+ return (ret == -ENODEV) ? 0 : ret;
+ }
+
+ ret = mpu_enable_region(®ion);
+ if (ret) {
+ mpu_del_region(vma->vm_mm, ®ion);
+ return ret;
+ }
+
+ vma->vm_mpu = kzalloc(sizeof(struct mpu_region), GFP_KERNEL);
+ if (!vma->vm_mpu)
+ return -ENOMEM;
+ *vma->vm_mpu = region;
+ return 0;
+}
+#else /* CONFIG_CPU_CP15_MPU */
+static inline int add_mpu_region(struct vm_area_struct *vma)
+{
+ return 0;
+}
+#endif /* CONFIG_CPU_CP15_MPU */
+
/*
* set up a shared mapping on a file
*/
@@ -748,9 +791,12 @@
int ret;
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
- if (ret != -ENOSYS)
+ if (ret != -ENOSYS) {
+ if (ret == 0)
+ ret = add_mpu_region(vma);
return ret;
-
+ }
+
/* getting an ENOSYS error indicates that direct mmap isn't
* possible (as opposed to tried but failed) so we'll fall
* through to making a private copy of the data and mapping
@@ -775,6 +821,8 @@
if (ret != -ENOSYS) {
/* shouldn't return success if we're not sharing */
BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
+ if (ret == 0)
+ ret = add_mpu_region(vma);
return ret; /* success or a real error */
}
@@ -796,9 +844,9 @@
vma->vm_flags |= VM_MAPPED_COPY;
#ifdef WARN_ON_SLACK
- if (len + WARN_ON_SLACK <= kobjsize(result))
+ if (len + WARN_ON_SLACK <= kobjsize(base))
printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
- len, current->pid, kobjsize(result) - len);
+ len, current->pid, kobjsize(base) - len);
#endif
if (vma->vm_file) {
@@ -964,6 +1012,7 @@
atomic_set(&vma->vm_usage, 1);
if (file)
get_file(file);
+ vma->vm_mm = current->mm;
vma->vm_file = file;
vma->vm_flags = vm_flags;
vma->vm_start = addr;
@@ -1030,6 +1079,10 @@
if (vma) {
if (vma->vm_file)
fput(vma->vm_file);
+ if (vma->vm_mpu) {
+ mpu_del_region(current->mm, vma->vm_mpu);
+ kfree(vma->vm_mpu);
+ }
kfree(vma);
}
return ret;
@@ -1083,6 +1136,12 @@
if (vma->vm_file)
fput(vma->vm_file);
+
+ if (vma->vm_mpu) {
+ mpu_del_region(mm, vma->vm_mpu);
+ kfree(vma->vm_mpu);
+ }
+
kfree(vma);
}
_______________________________________________
uClinux-dev mailing list
uClinux-dev@uclinux.org
http://mailman.uclinux.org/mailman/listinfo/uclinux-dev
This message was resent by uclinux-dev@uclinux.org
To unsubscribe see:
http://mailman.uclinux.org/mailman/options/uclinux-dev