Re: [PATCH 2/2] riscv: Add KASAN support

2019-09-03 Thread Nick Hu
Hi Daniel,

On Wed, Sep 04, 2019 at 01:08:51AM +1000, Daniel Axtens wrote:
> Nick Hu  writes:
> 
> > Hi Christoph,
> >
> > Thanks for your reply. I will answer one by one.
> >
> > Hi Alexander,
> >
> > Would you help me for the question about SOFTIRQENTRY_TEXT?
> >
> > On Mon, Aug 12, 2019 at 11:10:50PM +0800, Christoph Hellwig wrote:
> >> > 2. KASAN can't debug the modules since the modules are allocated in 
> >> > VMALLOC
> >> > area. We mapped the shadow memory, which corresponding to VMALLOC area,
> >> > to the kasan_early_shadow_page because we don't have enough physical 
> >> > space
> >> > for all the shadow memory corresponding to VMALLOC area.
> >> 
> >> How do other architectures solve this problem?
> >> 
> > Other archs like arm64 and x86 allocate modules in their module region.
> 
> I've run in to a similar difficulty in ppc64. My approach has been to
> add a generic feature to allow kasan to handle vmalloc areas:
> 
> https://lore.kernel.org/linux-mm/20190903145536.3390-1-...@axtens.net/
> 
> I link this with ppc64 in this series:
> 
> https://lore.kernel.org/linuxppc-dev/20190806233827.16454-1-...@axtens.net/
> 
> However, see Christophe Leroy's comments: he thinks I should take a
> different approach in a number of places, including just adding a
> separate module area. I haven't had time to think through all of his
> proposals yet; in particular I'd want to think through what the
> implication of a separate module area is for KASLR.
> 
> Regards,
> Daniel
>
 
Thanks for the advice! I would study on it.

Regards,
Nick

> >
> >> > @@ -54,6 +54,8 @@ config RISCV
> >> >  select EDAC_SUPPORT
> >> >  select ARCH_HAS_GIGANTIC_PAGE
> >> >  select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
> >> > +select GENERIC_STRNCPY_FROM_USER if KASAN
> >> 
> >> Is there any reason why we can't always enabled this?  Also just
> >> enabling the generic efficient strncpy_from_user should probably be
> >> a separate patch.
> >> 
> > You're right, always enable it would be better.
> >
> >> > +select HAVE_ARCH_KASAN if MMU
> >> 
> >> Based on your cover letter this should be if MMU && 64BIT
> >> 
> >> >  #define __HAVE_ARCH_MEMCPY
> >> >  extern asmlinkage void *memcpy(void *, const void *, size_t);
> >> > +extern asmlinkage void *__memcpy(void *, const void *, size_t);
> >> >  
> >> >  #define __HAVE_ARCH_MEMMOVE
> >> >  extern asmlinkage void *memmove(void *, const void *, size_t);
> >> > +extern asmlinkage void *__memmove(void *, const void *, size_t);
> >> > +
> >> > +#define memcpy(dst, src, len) __memcpy(dst, src, len)
> >> > +#define memmove(dst, src, len) __memmove(dst, src, len)
> >> > +#define memset(s, c, n) __memset(s, c, n)
> >> 
> >> This looks weird and at least needs a very good comment.  Also
> >> with this we effectively don't need the non-prefixed prototypes
> >> anymore.  Also you probably want to split the renaming of the mem*
> >> routines into a separate patch with a proper changelog.
> >> 
> > I made some mistakes on this porting, this would be better:
> >
> > #define __HAVE_ARCH_MEMSET
> > extern asmlinkage void *memset(void *, int, size_t);
> > extern asmlinkage void *__memset(void *, int, size_t);
> >
> > #define __HAVE_ARCH_MEMCPY
> > extern asmlinkage void *memcpy(void *, const void *, size_t);
> > extern asmlinkage void *__memcpy(void *, const void *, size_t);
> >
> > #define __HAVE_ARCH_MEMMOVE
> > extern asmlinkage void *memmove(void *, const void *, size_t);
> > extern asmlinkage void *__memmove(void *, const void *, size_t);
> >
> > #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
> >
> > #define memcpy(dst, src, len) __memcpy(dst, src, len)
> > #define memmove(dst, src, len) __memmove(dst, src, len)
> > #define memset(s, c, n) __memset(s, c, n)
> >
> > #endif
> >
> >> >  #include 
> >> >  #include 
> >> >  
> >> > +#ifdef CONFIG_KASAN
> >> > +#include 
> >> > +#endif
> >> 
> >> Any good reason to not just always include the header?
> >>
> > Nope, I would remove the '#ifdef CONFIG_KASAN', and do the logic in the 
> > header
> > instead.
> >
> >> > +
> >> >  #ifdef CONFIG_DUMMY_CONSOLE
> >> >  struct screen_info screen_info = {
> >> >  .orig_video_lines   = 30,
> >> > @@ -64,12 +68,17 @@ void __init setup_arch(char **cmdline_p)
> >> >  
> >> >  setup_bootmem();
> >> >  paging_init();
> >> > +
> >> >  unflatten_device_tree();
> >> 
> >> spurious whitespace change.
> >> 
> >> > diff --git a/arch/riscv/kernel/vmlinux.lds.S 
> >> > b/arch/riscv/kernel/vmlinux.lds.S
> >> > index 23cd1a9..9700980 100644
> >> > --- a/arch/riscv/kernel/vmlinux.lds.S
> >> > +++ b/arch/riscv/kernel/vmlinux.lds.S
> >> > @@ -46,6 +46,7 @@ SECTIONS
> >> >  KPROBES_TEXT
> >> >  ENTRY_TEXT
> >> >  IRQENTRY_TEXT
> >> > +SOFTIRQENTRY_TEXT
> >> 
> >> Hmm.  What is the relation to kasan here?  Maybe we should add this
> >> separately with a good changelog?
> >> 
> > There is a 

Re: [PATCH 2/2] riscv: Add KASAN support

2019-09-03 Thread Daniel Axtens
Nick Hu  writes:

> Hi Christoph,
>
> Thanks for your reply. I will answer one by one.
>
> Hi Alexander,
>
> Would you help me for the question about SOFTIRQENTRY_TEXT?
>
> On Mon, Aug 12, 2019 at 11:10:50PM +0800, Christoph Hellwig wrote:
>> > 2. KASAN can't debug the modules since the modules are allocated in VMALLOC
>> > area. We mapped the shadow memory, which corresponding to VMALLOC area,
>> > to the kasan_early_shadow_page because we don't have enough physical space
>> > for all the shadow memory corresponding to VMALLOC area.
>> 
>> How do other architectures solve this problem?
>> 
> Other archs like arm64 and x86 allocate modules in their module region.

I've run in to a similar difficulty in ppc64. My approach has been to
add a generic feature to allow kasan to handle vmalloc areas:

https://lore.kernel.org/linux-mm/20190903145536.3390-1-...@axtens.net/

I link this with ppc64 in this series:

https://lore.kernel.org/linuxppc-dev/20190806233827.16454-1-...@axtens.net/

However, see Christophe Leroy's comments: he thinks I should take a
different approach in a number of places, including just adding a
separate module area. I haven't had time to think through all of his
proposals yet; in particular I'd want to think through what the
implication of a separate module area is for KASLR.

Regards,
Daniel

>
>> > @@ -54,6 +54,8 @@ config RISCV
>> >select EDAC_SUPPORT
>> >select ARCH_HAS_GIGANTIC_PAGE
>> >select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
>> > +  select GENERIC_STRNCPY_FROM_USER if KASAN
>> 
>> Is there any reason why we can't always enabled this?  Also just
>> enabling the generic efficient strncpy_from_user should probably be
>> a separate patch.
>> 
> You're right, always enable it would be better.
>
>> > +  select HAVE_ARCH_KASAN if MMU
>> 
>> Based on your cover letter this should be if MMU && 64BIT
>> 
>> >  #define __HAVE_ARCH_MEMCPY
>> >  extern asmlinkage void *memcpy(void *, const void *, size_t);
>> > +extern asmlinkage void *__memcpy(void *, const void *, size_t);
>> >  
>> >  #define __HAVE_ARCH_MEMMOVE
>> >  extern asmlinkage void *memmove(void *, const void *, size_t);
>> > +extern asmlinkage void *__memmove(void *, const void *, size_t);
>> > +
>> > +#define memcpy(dst, src, len) __memcpy(dst, src, len)
>> > +#define memmove(dst, src, len) __memmove(dst, src, len)
>> > +#define memset(s, c, n) __memset(s, c, n)
>> 
>> This looks weird and at least needs a very good comment.  Also
>> with this we effectively don't need the non-prefixed prototypes
>> anymore.  Also you probably want to split the renaming of the mem*
>> routines into a separate patch with a proper changelog.
>> 
> I made some mistakes on this porting, this would be better:
>
> #define __HAVE_ARCH_MEMSET
> extern asmlinkage void *memset(void *, int, size_t);
> extern asmlinkage void *__memset(void *, int, size_t);
>
> #define __HAVE_ARCH_MEMCPY
> extern asmlinkage void *memcpy(void *, const void *, size_t);
> extern asmlinkage void *__memcpy(void *, const void *, size_t);
>
> #define __HAVE_ARCH_MEMMOVE
> extern asmlinkage void *memmove(void *, const void *, size_t);
> extern asmlinkage void *__memmove(void *, const void *, size_t);
>
> #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
>
> #define memcpy(dst, src, len) __memcpy(dst, src, len)
> #define memmove(dst, src, len) __memmove(dst, src, len)
> #define memset(s, c, n) __memset(s, c, n)
>
> #endif
>
>> >  #include 
>> >  #include 
>> >  
>> > +#ifdef CONFIG_KASAN
>> > +#include 
>> > +#endif
>> 
>> Any good reason to not just always include the header?
>>
> Nope, I would remove the '#ifdef CONFIG_KASAN', and do the logic in the header
> instead.
>
>> > +
>> >  #ifdef CONFIG_DUMMY_CONSOLE
>> >  struct screen_info screen_info = {
>> >.orig_video_lines   = 30,
>> > @@ -64,12 +68,17 @@ void __init setup_arch(char **cmdline_p)
>> >  
>> >setup_bootmem();
>> >paging_init();
>> > +
>> >unflatten_device_tree();
>> 
>> spurious whitespace change.
>> 
>> > diff --git a/arch/riscv/kernel/vmlinux.lds.S 
>> > b/arch/riscv/kernel/vmlinux.lds.S
>> > index 23cd1a9..9700980 100644
>> > --- a/arch/riscv/kernel/vmlinux.lds.S
>> > +++ b/arch/riscv/kernel/vmlinux.lds.S
>> > @@ -46,6 +46,7 @@ SECTIONS
>> >KPROBES_TEXT
>> >ENTRY_TEXT
>> >IRQENTRY_TEXT
>> > +  SOFTIRQENTRY_TEXT
>> 
>> Hmm.  What is the relation to kasan here?  Maybe we should add this
>> separately with a good changelog?
>> 
> There is a commit for it:
>
> Author: Alexander Potapenko 
> Date:   Fri Mar 25 14:22:05 2016 -0700
>
> arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections
>
> KASAN needs to know whether the allocation happens in an IRQ handler.
> This lets us strip everything below the IRQ entry point to reduce the
> number of unique stack traces needed to be stored.
>
> Move the definition of __irq_entry to  so that the
> users don't need to pull in .  Also introduce the
> 

Re: [PATCH 2/2] riscv: Add KASAN support

2019-08-22 Thread Andrey Ryabinin



On 8/14/19 10:44 AM, Nick Hu wrote:

>>
>>> diff --git a/arch/riscv/kernel/vmlinux.lds.S 
>>> b/arch/riscv/kernel/vmlinux.lds.S
>>> index 23cd1a9..9700980 100644
>>> --- a/arch/riscv/kernel/vmlinux.lds.S
>>> +++ b/arch/riscv/kernel/vmlinux.lds.S
>>> @@ -46,6 +46,7 @@ SECTIONS
>>> KPROBES_TEXT
>>> ENTRY_TEXT
>>> IRQENTRY_TEXT
>>> +   SOFTIRQENTRY_TEXT
>>
>> Hmm.  What is the relation to kasan here?  Maybe we should add this
>> separately with a good changelog?
>>
> There is a commit for it:
> 
> Author: Alexander Potapenko 
> Date:   Fri Mar 25 14:22:05 2016 -0700
> 
> arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections
> 
> KASAN needs to know whether the allocation happens in an IRQ handler.
> This lets us strip everything below the IRQ entry point to reduce the
> number of unique stack traces needed to be stored.
> 
> Move the definition of __irq_entry to  so that the
> users don't need to pull in .  Also introduce the
> __softirq_entry macro which is similar to __irq_entry, but puts the
> corresponding functions to the .softirqentry.text section.
> 
> After reading the patch I understand that soft/hard IRQ entries should be
> separated for KASAN to work, but why?
> 

KASAN doesn't need soft/hard IRQ entries separated. KASAN wants to know the 
entry
point of IRQ (hard or soft) to filter out random non-irq part of the stacktrace 
before feeding it to
stack_depot_save. See filter_irq_stacks().




Re: [PATCH 2/2] riscv: Add KASAN support

2019-08-14 Thread Nick Hu
Hi Christoph,

Thanks for your reply. I will answer one by one.

Hi Alexander,

Would you help me for the question about SOFTIRQENTRY_TEXT?

On Mon, Aug 12, 2019 at 11:10:50PM +0800, Christoph Hellwig wrote:
> > 2. KASAN can't debug the modules since the modules are allocated in VMALLOC
> > area. We mapped the shadow memory, which corresponding to VMALLOC area,
> > to the kasan_early_shadow_page because we don't have enough physical space
> > for all the shadow memory corresponding to VMALLOC area.
> 
> How do other architectures solve this problem?
> 
Other archs like arm64 and x86 allocate modules in their module region.

> > @@ -54,6 +54,8 @@ config RISCV
> > select EDAC_SUPPORT
> > select ARCH_HAS_GIGANTIC_PAGE
> > select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
> > +   select GENERIC_STRNCPY_FROM_USER if KASAN
> 
> Is there any reason why we can't always enabled this?  Also just
> enabling the generic efficient strncpy_from_user should probably be
> a separate patch.
> 
You're right, always enable it would be better.

> > +   select HAVE_ARCH_KASAN if MMU
> 
> Based on your cover letter this should be if MMU && 64BIT
> 
> >  #define __HAVE_ARCH_MEMCPY
> >  extern asmlinkage void *memcpy(void *, const void *, size_t);
> > +extern asmlinkage void *__memcpy(void *, const void *, size_t);
> >  
> >  #define __HAVE_ARCH_MEMMOVE
> >  extern asmlinkage void *memmove(void *, const void *, size_t);
> > +extern asmlinkage void *__memmove(void *, const void *, size_t);
> > +
> > +#define memcpy(dst, src, len) __memcpy(dst, src, len)
> > +#define memmove(dst, src, len) __memmove(dst, src, len)
> > +#define memset(s, c, n) __memset(s, c, n)
> 
> This looks weird and at least needs a very good comment.  Also
> with this we effectively don't need the non-prefixed prototypes
> anymore.  Also you probably want to split the renaming of the mem*
> routines into a separate patch with a proper changelog.
> 
I made some mistakes on this porting, this would be better:

#define __HAVE_ARCH_MEMSET
extern asmlinkage void *memset(void *, int, size_t);
extern asmlinkage void *__memset(void *, int, size_t);

#define __HAVE_ARCH_MEMCPY
extern asmlinkage void *memcpy(void *, const void *, size_t);
extern asmlinkage void *__memcpy(void *, const void *, size_t);

#define __HAVE_ARCH_MEMMOVE
extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t);

#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)

#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)

#endif

> >  #include 
> >  #include 
> >  
> > +#ifdef CONFIG_KASAN
> > +#include 
> > +#endif
> 
> Any good reason to not just always include the header?
>
Nope, I would remove the '#ifdef CONFIG_KASAN', and do the logic in the header
instead.

> > +
> >  #ifdef CONFIG_DUMMY_CONSOLE
> >  struct screen_info screen_info = {
> > .orig_video_lines   = 30,
> > @@ -64,12 +68,17 @@ void __init setup_arch(char **cmdline_p)
> >  
> > setup_bootmem();
> > paging_init();
> > +
> > unflatten_device_tree();
> 
> spurious whitespace change.
> 
> > diff --git a/arch/riscv/kernel/vmlinux.lds.S 
> > b/arch/riscv/kernel/vmlinux.lds.S
> > index 23cd1a9..9700980 100644
> > --- a/arch/riscv/kernel/vmlinux.lds.S
> > +++ b/arch/riscv/kernel/vmlinux.lds.S
> > @@ -46,6 +46,7 @@ SECTIONS
> > KPROBES_TEXT
> > ENTRY_TEXT
> > IRQENTRY_TEXT
> > +   SOFTIRQENTRY_TEXT
> 
> Hmm.  What is the relation to kasan here?  Maybe we should add this
> separately with a good changelog?
> 
There is a commit for it:

Author: Alexander Potapenko 
Date:   Fri Mar 25 14:22:05 2016 -0700

arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections

KASAN needs to know whether the allocation happens in an IRQ handler.
This lets us strip everything below the IRQ entry point to reduce the
number of unique stack traces needed to be stored.

Move the definition of __irq_entry to  so that the
users don't need to pull in .  Also introduce the
__softirq_entry macro which is similar to __irq_entry, but puts the
corresponding functions to the .softirqentry.text section.

After reading the patch I understand that soft/hard IRQ entries should be
separated for KASAN to work, but why?

Alexender, do you have any comments on this?

> > +++ b/arch/riscv/mm/kasan_init.c
> > @@ -0,0 +1,102 @@
> > +// SPDX-License-Identifier: GPL-2.0
> 
> This probably also wants a copyright statement.
> 
> > +   // init for swapper_pg_dir
> 
> Please use /* */ style comments.


Re: [PATCH 2/2] riscv: Add KASAN support

2019-08-12 Thread Christoph Hellwig
> 2. KASAN can't debug the modules since the modules are allocated in VMALLOC
> area. We mapped the shadow memory, which corresponding to VMALLOC area,
> to the kasan_early_shadow_page because we don't have enough physical space
> for all the shadow memory corresponding to VMALLOC area.

How do other architectures solve this problem?

> @@ -54,6 +54,8 @@ config RISCV
>   select EDAC_SUPPORT
>   select ARCH_HAS_GIGANTIC_PAGE
>   select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
> + select GENERIC_STRNCPY_FROM_USER if KASAN

Is there any reason why we can't always enabled this?  Also just
enabling the generic efficient strncpy_from_user should probably be
a separate patch.

> + select HAVE_ARCH_KASAN if MMU

Based on your cover letter this should be if MMU && 64BIT

>  #define __HAVE_ARCH_MEMCPY
>  extern asmlinkage void *memcpy(void *, const void *, size_t);
> +extern asmlinkage void *__memcpy(void *, const void *, size_t);
>  
>  #define __HAVE_ARCH_MEMMOVE
>  extern asmlinkage void *memmove(void *, const void *, size_t);
> +extern asmlinkage void *__memmove(void *, const void *, size_t);
> +
> +#define memcpy(dst, src, len) __memcpy(dst, src, len)
> +#define memmove(dst, src, len) __memmove(dst, src, len)
> +#define memset(s, c, n) __memset(s, c, n)

This looks weird and at least needs a very good comment.  Also
with this we effectively don't need the non-prefixed prototypes
anymore.  Also you probably want to split the renaming of the mem*
routines into a separate patch with a proper changelog.

>  #include 
>  #include 
>  
> +#ifdef CONFIG_KASAN
> +#include 
> +#endif

Any good reason to not just always include the header?

> +
>  #ifdef CONFIG_DUMMY_CONSOLE
>  struct screen_info screen_info = {
>   .orig_video_lines   = 30,
> @@ -64,12 +68,17 @@ void __init setup_arch(char **cmdline_p)
>  
>   setup_bootmem();
>   paging_init();
> +
>   unflatten_device_tree();

spurious whitespace change.

> diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
> index 23cd1a9..9700980 100644
> --- a/arch/riscv/kernel/vmlinux.lds.S
> +++ b/arch/riscv/kernel/vmlinux.lds.S
> @@ -46,6 +46,7 @@ SECTIONS
>   KPROBES_TEXT
>   ENTRY_TEXT
>   IRQENTRY_TEXT
> + SOFTIRQENTRY_TEXT

Hmm.  What is the relation to kasan here?  Maybe we should add this
separately with a good changelog?

> +++ b/arch/riscv/mm/kasan_init.c
> @@ -0,0 +1,102 @@
> +// SPDX-License-Identifier: GPL-2.0

This probably also wants a copyright statement.

> + // init for swapper_pg_dir

Please use /* */ style comments.


[PATCH 2/2] riscv: Add KASAN support

2019-08-07 Thread Nick Hu
This patch ports the feature Kernel Address SANitizer (KASAN).

Note: The start address of shadow memory is at the beginning of kernel
space, which is 2^64 - (2^39 / 2) in SV39. The size of the kernel space
is 2^38 bytes so the size of shadow memory should be 2^38 / 8. Thus, the
shadow memory would not overlap with the fixmap area.

There are currently two limitations in this port,

1. RV64 only: KASAN need large address space for extra shadow memory
region.

2. KASAN can't debug the modules since the modules are allocated in VMALLOC
area. We mapped the shadow memory, which corresponding to VMALLOC area,
to the kasan_early_shadow_page because we don't have enough physical space
for all the shadow memory corresponding to VMALLOC area.

Signed-off-by: Nick Hu 
---
 arch/riscv/Kconfig  |2 +
 arch/riscv/include/asm/kasan.h  |   26 +
 arch/riscv/include/asm/pgtable-64.h |5 ++
 arch/riscv/include/asm/string.h |7 +++
 arch/riscv/kernel/head.S|3 +
 arch/riscv/kernel/riscv_ksyms.c |3 +
 arch/riscv/kernel/setup.c   |9 +++
 arch/riscv/kernel/vmlinux.lds.S |1 +
 arch/riscv/lib/memcpy.S |5 +-
 arch/riscv/lib/memmove.S|5 +-
 arch/riscv/lib/memset.S |5 +-
 arch/riscv/mm/Makefile  |6 ++
 arch/riscv/mm/kasan_init.c  |  102 +++
 13 files changed, 173 insertions(+), 6 deletions(-)
 create mode 100644 arch/riscv/include/asm/kasan.h
 create mode 100644 arch/riscv/mm/kasan_init.c

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 59a4727..4878b7a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -54,6 +54,8 @@ config RISCV
select EDAC_SUPPORT
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
+   select GENERIC_STRNCPY_FROM_USER if KASAN
+   select HAVE_ARCH_KASAN if MMU
 
 config MMU
def_bool y
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
new file mode 100644
index 000..e0c1f27
--- /dev/null
+++ b/arch/riscv/include/asm/kasan.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include 
+
+#define KASAN_SHADOW_SCALE_SHIFT   3
+
+#define KASAN_SHADOW_SIZE  (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_START 0xffc0 // 2^64 - 2^38
+#define KASAN_SHADOW_END   (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#define KASAN_SHADOW_OFFSET(KASAN_SHADOW_END - (1ULL << \
+   (64 - KASAN_SHADOW_SCALE_SHIFT)))
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#endif
+#endif
+#endif
diff --git a/arch/riscv/include/asm/pgtable-64.h 
b/arch/riscv/include/asm/pgtable-64.h
index 7df8daa..777a1dd 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -59,6 +59,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
 }
 
+static inline struct page *pud_page(pud_t pud)
+{
+   return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+}
+
 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index 11210f1..ab90f44 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -11,11 +11,18 @@
 
 #define __HAVE_ARCH_MEMSET
 extern asmlinkage void *memset(void *, int, size_t);
+extern asmlinkage void *__memset(void *, int, size_t);
 
 #define __HAVE_ARCH_MEMCPY
 extern asmlinkage void *memcpy(void *, const void *, size_t);
+extern asmlinkage void *__memcpy(void *, const void *, size_t);
 
 #define __HAVE_ARCH_MEMMOVE
 extern asmlinkage void *memmove(void *, const void *, size_t);
+extern asmlinkage void *__memmove(void *, const void *, size_t);
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
 
 #endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 0f1ba17..2f7bc8b 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -97,6 +97,9 @@ clear_bss_done:
sw zero, TASK_TI_CPU(tp)
la sp, init_thread_union + THREAD_SIZE
 
+#ifdef CONFIG_KASAN
+   call kasan_early_init
+#endif
/* Start the kernel */
call parse_dtb
tail start_kernel
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index ffabaf1..ad9f007 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -15,3 +15,6 @@
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);