Create and destroy mappings aliased to a user-space mapping with the same cache coloring as the userspace mapping. Allow the kernel to load from and store to pages shared with user-space through its own mapping in kernel virtual addresses while ensuring cache conherency between kernel and userspace mappings for virtually aliased architectures.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoy...@efficios.com> Reviewed-by: Matthew Wilcox <mawil...@microsoft.com> CC: "Paul E. McKenney" <paul...@linux.vnet.ibm.com> CC: Peter Zijlstra <pet...@infradead.org> CC: Paul Turner <p...@google.com> CC: Thomas Gleixner <t...@linutronix.de> CC: Andy Lutomirski <l...@amacapital.net> CC: Andi Kleen <a...@firstfloor.org> CC: Dave Watson <davejwat...@fb.com> CC: Chris Lameter <c...@linux.com> CC: Ingo Molnar <mi...@redhat.com> CC: "H. Peter Anvin" <h...@zytor.com> CC: Ben Maurer <bmau...@fb.com> CC: Steven Rostedt <rost...@goodmis.org> CC: Josh Triplett <j...@joshtriplett.org> CC: Linus Torvalds <torva...@linux-foundation.org> CC: Andrew Morton <a...@linux-foundation.org> CC: Russell King <li...@arm.linux.org.uk> CC: Catalin Marinas <catalin.mari...@arm.com> CC: Will Deacon <will.dea...@arm.com> CC: Michael Kerrisk <mtk.manpa...@gmail.com> CC: Boqun Feng <boqun.f...@gmail.com> --- Changes since v1: - Use WARN_ON() rather than BUG_ON(). --- include/linux/vmalloc.h | 4 +++ mm/vmalloc.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 398e9c95cd61..899657b3d469 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -59,6 +59,10 @@ struct vmap_area { extern void vm_unmap_ram(const void *mem, unsigned int count); extern void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot); +extern void vm_unmap_user_ram(const void *mem, unsigned int count); +extern void *vm_map_user_ram(struct page **pages, unsigned int count, + unsigned long uaddr, int node, pgprot_t prot); + extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a236bac872f0..8df3c572036c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1188,6 +1188,72 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro } EXPORT_SYMBOL(vm_map_ram); +/** + * vm_unmap_user_ram - unmap linear kernel address space set up by vm_map_user_ram + * @mem: the pointer returned by vm_map_user_ram + * @count: the count passed to that vm_map_user_ram call (cannot unmap partial) + */ +void vm_unmap_user_ram(const void *mem, unsigned int count) +{ + unsigned long size = (unsigned long)count << PAGE_SHIFT; + unsigned long addr = (unsigned long)mem; + struct vmap_area *va; + + might_sleep(); + if (WARN_ON(!addr) || + WARN_ON(addr < VMALLOC_START) || + WARN_ON(addr > VMALLOC_END) || + WARN_ON(!PAGE_ALIGNED(addr))) + return; + + debug_check_no_locks_freed(mem, size); + va = find_vmap_area(addr); + if (WARN_ON(!va)) + return; + free_unmap_vmap_area(va); +} +EXPORT_SYMBOL(vm_unmap_user_ram); + +/** + * vm_map_user_ram - map user space pages linearly into kernel virtual address + * @pages: an array of pointers to the virtually contiguous pages to be mapped + * @count: number of pages + * @uaddr: address within the first page in the userspace mapping + * @node: prefer to allocate data structures on this node + * @prot: memory protection to use. PAGE_KERNEL for regular RAM + * + * Create a mapping aliased to a user-space mapping with the same cache + * coloring as the userspace mapping. Allow the kernel to load from and + * store to pages shared with user-space through its own mapping in kernel + * virtual addresses while ensuring cache conherency between kernel and + * userspace mappings for virtually aliased architectures. + * + * Returns: a pointer to the address that has been mapped, or %NULL on failure + */ +void *vm_map_user_ram(struct page **pages, unsigned int count, + unsigned long uaddr, int node, pgprot_t prot) +{ + unsigned long size = (unsigned long)count << PAGE_SHIFT; + unsigned long va_offset = ALIGN_DOWN(uaddr, PAGE_SIZE) & (SHMLBA - 1); + unsigned long alloc_size = ALIGN(va_offset + size, SHMLBA); + struct vmap_area *va; + unsigned long addr; + void *mem; + + va = alloc_vmap_area(alloc_size, SHMLBA, VMALLOC_START, VMALLOC_END, + node, GFP_KERNEL); + if (IS_ERR(va)) + return NULL; + addr = va->va_start + va_offset; + mem = (void *)addr; + if (vmap_page_range(addr, addr + size, prot, pages) < 0) { + vm_unmap_user_ram(mem, count); + return NULL; + } + return mem; +} +EXPORT_SYMBOL(vm_map_user_ram); + static struct vm_struct *vmlist __initdata; /** * vm_area_add_early - add vmap area early during boot -- 2.11.0