Re: [v2 PATCH] arm64: kasan: instrument user memory access API

2016-06-06 Thread Mark Rutland
On Fri, May 27, 2016 at 02:01:03PM -0700, Yang Shi wrote:
> The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
> ("x86/kasan: instrument user memory access API") added KASAN instrument to
> x86 user memory access API, so added such instrument to ARM64 too.
> 
> Define __copy_to/from_user in C in order to add kasan_check_read/write call,
> rename assembly implementation to __arch_copy_to/from_user.
> 
> Tested by test_kasan module.
> 
> Signed-off-by: Yang Shi 
> ---
> v2:
>  Adopted the comment from Andrey and Mark to add kasan_check_read/write into
>  __copy_to/from_user.
> 
>  arch/arm64/include/asm/uaccess.h | 25 +
>  arch/arm64/kernel/arm64ksyms.c   |  4 ++--
>  arch/arm64/lib/copy_from_user.S  |  4 ++--
>  arch/arm64/lib/copy_to_user.S|  4 ++--
>  4 files changed, 27 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/uaccess.h 
> b/arch/arm64/include/asm/uaccess.h
> index 0685d74..4dc9a8f 100644
> --- a/arch/arm64/include/asm/uaccess.h
> +++ b/arch/arm64/include/asm/uaccess.h
> @@ -23,6 +23,7 @@
>   */
>  #include 
>  #include 
> +#include 

Nit: please move this before the other includes, to keep these ordered
alphabetically.

Other than that, this looks correct to me, and seems to have addressed
the issue from v1. I've given this a spin on v4.7-rc2, with and without
CONFIG_UBSAN enabled. So FWIW, with the minor fix above:

Reviewed-by: Mark Rutland 
Tested-by: Mark Rutland 

As this isn't a fix, I assume that this is for Catalin to pick for v4.8.

Thanks,
Mark.


Re: [v2 PATCH] arm64: kasan: instrument user memory access API

2016-06-06 Thread Mark Rutland
On Fri, May 27, 2016 at 02:01:03PM -0700, Yang Shi wrote:
> The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
> ("x86/kasan: instrument user memory access API") added KASAN instrument to
> x86 user memory access API, so added such instrument to ARM64 too.
> 
> Define __copy_to/from_user in C in order to add kasan_check_read/write call,
> rename assembly implementation to __arch_copy_to/from_user.
> 
> Tested by test_kasan module.
> 
> Signed-off-by: Yang Shi 
> ---
> v2:
>  Adopted the comment from Andrey and Mark to add kasan_check_read/write into
>  __copy_to/from_user.
> 
>  arch/arm64/include/asm/uaccess.h | 25 +
>  arch/arm64/kernel/arm64ksyms.c   |  4 ++--
>  arch/arm64/lib/copy_from_user.S  |  4 ++--
>  arch/arm64/lib/copy_to_user.S|  4 ++--
>  4 files changed, 27 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/uaccess.h 
> b/arch/arm64/include/asm/uaccess.h
> index 0685d74..4dc9a8f 100644
> --- a/arch/arm64/include/asm/uaccess.h
> +++ b/arch/arm64/include/asm/uaccess.h
> @@ -23,6 +23,7 @@
>   */
>  #include 
>  #include 
> +#include 

Nit: please move this before the other includes, to keep these ordered
alphabetically.

Other than that, this looks correct to me, and seems to have addressed
the issue from v1. I've given this a spin on v4.7-rc2, with and without
CONFIG_UBSAN enabled. So FWIW, with the minor fix above:

Reviewed-by: Mark Rutland 
Tested-by: Mark Rutland 

As this isn't a fix, I assume that this is for Catalin to pick for v4.8.

Thanks,
Mark.


Re: [v2 PATCH] arm64: kasan: instrument user memory access API

2016-06-06 Thread Shi, Yang

Hi Will & Catalin,

Any comment for this patch?

Thanks,
Yang


On 5/27/2016 2:01 PM, Yang Shi wrote:

The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
("x86/kasan: instrument user memory access API") added KASAN instrument to
x86 user memory access API, so added such instrument to ARM64 too.

Define __copy_to/from_user in C in order to add kasan_check_read/write call,
rename assembly implementation to __arch_copy_to/from_user.

Tested by test_kasan module.

Signed-off-by: Yang Shi 
---
v2:
 Adopted the comment from Andrey and Mark to add kasan_check_read/write into
 __copy_to/from_user.

 arch/arm64/include/asm/uaccess.h | 25 +
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/copy_from_user.S  |  4 ++--
 arch/arm64/lib/copy_to_user.S|  4 ++--
 4 files changed, 27 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74..4dc9a8f 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -23,6 +23,7 @@
  */
 #include 
 #include 
+#include 

 #include 
 #include 
@@ -269,15 +270,29 @@ do {  
\
-EFAULT;\
 })

-extern unsigned long __must_check __copy_from_user(void *to, const void __user 
*from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void 
*from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void 
__user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const 
void *from, unsigned long n);
 extern unsigned long __must_check __copy_in_user(void __user *to, const void 
__user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned 
long n);

+static inline unsigned long __must_check __copy_from_user(void *to, const void 
__user *from, unsigned long n)
+{
+   kasan_check_write(to, n);
+   return  __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const 
void *from, unsigned long n)
+{
+   kasan_check_read(from, n);
+   return  __arch_copy_to_user(to, from, n);
+}
+
 static inline unsigned long __must_check copy_from_user(void *to, const void 
__user *from, unsigned long n)
 {
+   kasan_check_write(to, n);
+
if (access_ok(VERIFY_READ, from, n))
-   n = __copy_from_user(to, from, n);
+   n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
memset(to, 0, n);
return n;
@@ -285,8 +300,10 @@ static inline unsigned long __must_check 
copy_from_user(void *to, const void __u

 static inline unsigned long __must_check copy_to_user(void __user *to, const 
void *from, unsigned long n)
 {
+   kasan_check_read(from, n);
+
if (access_ok(VERIFY_WRITE, to, n))
-   n = __copy_to_user(to, from, n);
+   n = __arch_copy_to_user(to, from, n);
return n;
 }

diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 678f30b0..2dc4440 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);

/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_in_user);

diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306..0b90497 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm

 end.reqx5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0  // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)

.section .fixup,"ax"
.align  2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae6..7a7efe2 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm

 end.reqx5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret

Re: [v2 PATCH] arm64: kasan: instrument user memory access API

2016-06-06 Thread Shi, Yang

Hi Will & Catalin,

Any comment for this patch?

Thanks,
Yang


On 5/27/2016 2:01 PM, Yang Shi wrote:

The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
("x86/kasan: instrument user memory access API") added KASAN instrument to
x86 user memory access API, so added such instrument to ARM64 too.

Define __copy_to/from_user in C in order to add kasan_check_read/write call,
rename assembly implementation to __arch_copy_to/from_user.

Tested by test_kasan module.

Signed-off-by: Yang Shi 
---
v2:
 Adopted the comment from Andrey and Mark to add kasan_check_read/write into
 __copy_to/from_user.

 arch/arm64/include/asm/uaccess.h | 25 +
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/copy_from_user.S  |  4 ++--
 arch/arm64/lib/copy_to_user.S|  4 ++--
 4 files changed, 27 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74..4dc9a8f 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -23,6 +23,7 @@
  */
 #include 
 #include 
+#include 

 #include 
 #include 
@@ -269,15 +270,29 @@ do {  
\
-EFAULT;\
 })

-extern unsigned long __must_check __copy_from_user(void *to, const void __user 
*from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void 
*from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void 
__user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const 
void *from, unsigned long n);
 extern unsigned long __must_check __copy_in_user(void __user *to, const void 
__user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned 
long n);

+static inline unsigned long __must_check __copy_from_user(void *to, const void 
__user *from, unsigned long n)
+{
+   kasan_check_write(to, n);
+   return  __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const 
void *from, unsigned long n)
+{
+   kasan_check_read(from, n);
+   return  __arch_copy_to_user(to, from, n);
+}
+
 static inline unsigned long __must_check copy_from_user(void *to, const void 
__user *from, unsigned long n)
 {
+   kasan_check_write(to, n);
+
if (access_ok(VERIFY_READ, from, n))
-   n = __copy_from_user(to, from, n);
+   n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
memset(to, 0, n);
return n;
@@ -285,8 +300,10 @@ static inline unsigned long __must_check 
copy_from_user(void *to, const void __u

 static inline unsigned long __must_check copy_to_user(void __user *to, const 
void *from, unsigned long n)
 {
+   kasan_check_read(from, n);
+
if (access_ok(VERIFY_WRITE, to, n))
-   n = __copy_to_user(to, from, n);
+   n = __arch_copy_to_user(to, from, n);
return n;
 }

diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 678f30b0..2dc4440 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);

/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_in_user);

diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306..0b90497 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm

 end.reqx5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0  // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)

.section .fixup,"ax"
.align  2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae6..7a7efe2 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm

 end.reqx5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
-ENDPROC(__copy_to_user)

Re: [v2 PATCH] arm64: kasan: instrument user memory access API

2016-05-30 Thread Andrey Ryabinin

On 05/28/2016 12:01 AM, Yang Shi wrote:
> The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
> ("x86/kasan: instrument user memory access API") added KASAN instrument to
> x86 user memory access API, so added such instrument to ARM64 too.
> 
> Define __copy_to/from_user in C in order to add kasan_check_read/write call,
> rename assembly implementation to __arch_copy_to/from_user.
> 
> Tested by test_kasan module.
> 
> Signed-off-by: Yang Shi 
> 

Acked-by: Andrey Ryabinin 


Re: [v2 PATCH] arm64: kasan: instrument user memory access API

2016-05-30 Thread Andrey Ryabinin

On 05/28/2016 12:01 AM, Yang Shi wrote:
> The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
> ("x86/kasan: instrument user memory access API") added KASAN instrument to
> x86 user memory access API, so added such instrument to ARM64 too.
> 
> Define __copy_to/from_user in C in order to add kasan_check_read/write call,
> rename assembly implementation to __arch_copy_to/from_user.
> 
> Tested by test_kasan module.
> 
> Signed-off-by: Yang Shi 
> 

Acked-by: Andrey Ryabinin 


[v2 PATCH] arm64: kasan: instrument user memory access API

2016-05-27 Thread Yang Shi
The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
("x86/kasan: instrument user memory access API") added KASAN instrument to
x86 user memory access API, so added such instrument to ARM64 too.

Define __copy_to/from_user in C in order to add kasan_check_read/write call,
rename assembly implementation to __arch_copy_to/from_user.

Tested by test_kasan module.

Signed-off-by: Yang Shi 
---
v2:
 Adopted the comment from Andrey and Mark to add kasan_check_read/write into
 __copy_to/from_user.

 arch/arm64/include/asm/uaccess.h | 25 +
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/copy_from_user.S  |  4 ++--
 arch/arm64/lib/copy_to_user.S|  4 ++--
 4 files changed, 27 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74..4dc9a8f 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -23,6 +23,7 @@
  */
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -269,15 +270,29 @@ do {  
\
-EFAULT;\
 })
 
-extern unsigned long __must_check __copy_from_user(void *to, const void __user 
*from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void 
*from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void 
__user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const 
void *from, unsigned long n);
 extern unsigned long __must_check __copy_in_user(void __user *to, const void 
__user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned 
long n);
 
+static inline unsigned long __must_check __copy_from_user(void *to, const void 
__user *from, unsigned long n)
+{
+   kasan_check_write(to, n);
+   return  __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const 
void *from, unsigned long n)
+{
+   kasan_check_read(from, n);
+   return  __arch_copy_to_user(to, from, n);
+}
+
 static inline unsigned long __must_check copy_from_user(void *to, const void 
__user *from, unsigned long n)
 {
+   kasan_check_write(to, n);
+
if (access_ok(VERIFY_READ, from, n))
-   n = __copy_from_user(to, from, n);
+   n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
memset(to, 0, n);
return n;
@@ -285,8 +300,10 @@ static inline unsigned long __must_check 
copy_from_user(void *to, const void __u
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const 
void *from, unsigned long n)
 {
+   kasan_check_read(from, n);
+
if (access_ok(VERIFY_WRITE, to, n))
-   n = __copy_to_user(to, from, n);
+   n = __arch_copy_to_user(to, from, n);
return n;
 }
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 678f30b0..2dc4440 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_in_user);
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306..0b90497 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm
 
 end.reqx5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0  // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
 
.section .fixup,"ax"
.align  2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae6..7a7efe2 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm
 
 end.reqx5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
 
.section .fixup,"ax"
.align  2
-- 

[v2 PATCH] arm64: kasan: instrument user memory access API

2016-05-27 Thread Yang Shi
The upstream commit 1771c6e1a567ea0ba20a4ffe68a1419fd8ef
("x86/kasan: instrument user memory access API") added KASAN instrument to
x86 user memory access API, so added such instrument to ARM64 too.

Define __copy_to/from_user in C in order to add kasan_check_read/write call,
rename assembly implementation to __arch_copy_to/from_user.

Tested by test_kasan module.

Signed-off-by: Yang Shi 
---
v2:
 Adopted the comment from Andrey and Mark to add kasan_check_read/write into
 __copy_to/from_user.

 arch/arm64/include/asm/uaccess.h | 25 +
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/copy_from_user.S  |  4 ++--
 arch/arm64/lib/copy_to_user.S|  4 ++--
 4 files changed, 27 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74..4dc9a8f 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -23,6 +23,7 @@
  */
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -269,15 +270,29 @@ do {  
\
-EFAULT;\
 })
 
-extern unsigned long __must_check __copy_from_user(void *to, const void __user 
*from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void 
*from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void 
__user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const 
void *from, unsigned long n);
 extern unsigned long __must_check __copy_in_user(void __user *to, const void 
__user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned 
long n);
 
+static inline unsigned long __must_check __copy_from_user(void *to, const void 
__user *from, unsigned long n)
+{
+   kasan_check_write(to, n);
+   return  __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const 
void *from, unsigned long n)
+{
+   kasan_check_read(from, n);
+   return  __arch_copy_to_user(to, from, n);
+}
+
 static inline unsigned long __must_check copy_from_user(void *to, const void 
__user *from, unsigned long n)
 {
+   kasan_check_write(to, n);
+
if (access_ok(VERIFY_READ, from, n))
-   n = __copy_from_user(to, from, n);
+   n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
memset(to, 0, n);
return n;
@@ -285,8 +300,10 @@ static inline unsigned long __must_check 
copy_from_user(void *to, const void __u
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const 
void *from, unsigned long n)
 {
+   kasan_check_read(from, n);
+
if (access_ok(VERIFY_WRITE, to, n))
-   n = __copy_to_user(to, from, n);
+   n = __arch_copy_to_user(to, from, n);
return n;
 }
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 678f30b0..2dc4440 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_in_user);
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306..0b90497 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm
 
 end.reqx5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0  // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
 
.section .fixup,"ax"
.align  2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae6..7a7efe2 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm
 
 end.reqx5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), 
ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
 
.section .fixup,"ax"
.align  2
-- 
2.0.2