Create x86_64 specific version of memset for user space, based on
clear_user().
This will be used for implementing wr_memset() in the __wr_after_init
scenario, where write-rare variables have an alternate mapping for
writing.

Signed-off-by: Igor Stoppa <igor.sto...@huawei.com>

CC: Andy Lutomirski <l...@amacapital.net>
CC: Nadav Amit <nadav.a...@gmail.com>
CC: Matthew Wilcox <wi...@infradead.org>
CC: Peter Zijlstra <pet...@infradead.org>
CC: Kees Cook <keesc...@chromium.org>
CC: Dave Hansen <dave.han...@linux.intel.com>
CC: Mimi Zohar <zo...@linux.vnet.ibm.com>
CC: Thiago Jung Bauermann <bauer...@linux.ibm.com>
CC: Ahmed Soliman <ahmedsoli...@mena.vt.edu>
CC: linux-integr...@vger.kernel.org
CC: kernel-harden...@lists.openwall.com
CC: linux...@kvack.org
CC: linux-kernel@vger.kernel.org
---
 arch/x86/include/asm/uaccess_64.h |  6 ++++
 arch/x86/lib/usercopy_64.c        | 54 +++++++++++++++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/arch/x86/include/asm/uaccess_64.h 
b/arch/x86/include/asm/uaccess_64.h
index a9d637bc301d..f194bfce4866 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -213,4 +213,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len);
 unsigned long
 mcsafe_handle_tail(char *to, char *from, unsigned len);
 
+unsigned long __must_check
+memset_user(void __user *mem, int c, unsigned long len);
+
+unsigned long __must_check
+__memset_user(void __user *mem, int c, unsigned long len);
+
 #endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 1bd837cdc4b1..84f8f8a20b30 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,6 +9,60 @@
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
 
+/*
+ * Memset Userspace
+ */
+
+unsigned long __memset_user(void __user *addr, int c, unsigned long size)
+{
+       long __d0;
+       unsigned long  pattern = 0;
+       int i;
+
+       for (i = 0; i < 8; i++)
+               pattern = (pattern << 8) | (0xFF & c);
+       might_fault();
+       /* no memory constraint: gcc doesn't know about this memory */
+       stac();
+       asm volatile(
+               "       movq %[val], %%rdx\n"
+               "       testq  %[size8],%[size8]\n"
+               "       jz     4f\n"
+               "0:     mov %%rdx,(%[dst])\n"
+               "       addq   $8,%[dst]\n"
+               "       decl %%ecx ; jnz   0b\n"
+               "4:     movq  %[size1],%%rcx\n"
+               "       testl %%ecx,%%ecx\n"
+               "       jz     2f\n"
+               "1:     movb   %%dl,(%[dst])\n"
+               "       incq   %[dst]\n"
+               "       decl %%ecx ; jnz  1b\n"
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "3:     lea 0(%[size1],%[size8],8),%[size8]\n"
+               "       jmp 2b\n"
+               ".previous\n"
+               _ASM_EXTABLE_UA(0b, 3b)
+               _ASM_EXTABLE_UA(1b, 2b)
+               : [size8] "=&c"(size), [dst] "=&D" (__d0)
+               : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+                 [val] "ri"(pattern)
+               : "rdx");
+
+       clac();
+       return size;
+}
+EXPORT_SYMBOL(__memset_user);
+
+unsigned long memset_user(void __user *to, int c, unsigned long n)
+{
+       if (access_ok(VERIFY_WRITE, to, n))
+               return __memset_user(to, c, n);
+       return n;
+}
+EXPORT_SYMBOL(memset_user);
+
+
 /*
  * Zero Userspace
  */
-- 
2.19.1

Reply via email to