Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
Le 18/02/2019 à 10:27, Michael Ellerman a écrit : Christophe Leroy writes: diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index e0637730a8e7..dba2c1038363 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -251,6 +251,10 @@ GLUE(.,name): #define _GLOBAL_TOC(name) _GLOBAL(name) +#define KASAN_OVERRIDE(x, y) \ + .weak x; \ + .set x, y + Can you add a comment describing what that does and why? It's gone. Hope the new approach is more clear. It's now in a dedicated patch. diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 879b36602748..fc4c42262694 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -16,8 +16,9 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o+= -fPIC endif -CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING +CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -DDISABLE_BRANCH_PROFILING +CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -DDISABLE_BRANCH_PROFILING Why do we need to disable branch profiling now? Recommended by Andrey, see https://patchwork.ozlabs.org/patch/1023887/ Maybe it should be only when KASAN is active ? For prom_init it should probably be all the time, for the others I don't know. Can't remember why I did it that way. I'd probably be happier if all the CFLAGS changes were done in a leadup patch to make them more obvious. Oops, I forgot to read your mail entirely before sending out v6. Indeed I only read first part. Anyway, that's probably not the last run. diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 667df97d2595..da6bb16e0876 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -16,8 +16,16 @@ # If you really need to reference something from prom_init.o add # it to the list below: +grep CONFIG_KASAN=y .config >/dev/null Just to be safe "^CONFIG_KASAN=y$" ? ok +if [ $? -eq 0 ] +then + MEMFCT="__memcpy __memset" +else + MEMFCT="memcpy memset" +fi MEM_FUNCS ? Yes, I change it now before I forget. diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 3bf9fc6fd36c..ce8d4a9f810a 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -8,6 +8,14 @@ ccflags-$(CONFIG_PPC64):= $(NO_MINIMAL_TOC) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) +KASAN_SANITIZE_code-patching.o := n +KASAN_SANITIZE_feature-fixups.o := n + +ifdef CONFIG_KASAN +CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING +CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING +endif There's that branch profiling again, though here it's only if KASAN is enabled. diff --git a/arch/powerpc/mm/kasan_init.c b/arch/powerpc/mm/kasan_init.c new file mode 100644 index ..bd8e0a263e12 --- /dev/null +++ b/arch/powerpc/mm/kasan_init.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define DISABLE_BRANCH_PROFILING + +#include +#include +#include +#include +#include + +void __init kasan_early_init(void) +{ + unsigned long addr = KASAN_SHADOW_START; + unsigned long end = KASAN_SHADOW_END; + unsigned long next; + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr); Can none of those fail? map_kernel_page() in pgtable_32.c does exactly the same. pud_offset() and pmd_offset() are no-ops and only serve as type modifiers, so pmd will get the value returned by pgd_offset_k() which should always be valid unless init_mm->pgd is bad. Christophe cheers
Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
Christophe Leroy writes: > diff --git a/arch/powerpc/include/asm/ppc_asm.h > b/arch/powerpc/include/asm/ppc_asm.h > index e0637730a8e7..dba2c1038363 100644 > --- a/arch/powerpc/include/asm/ppc_asm.h > +++ b/arch/powerpc/include/asm/ppc_asm.h > @@ -251,6 +251,10 @@ GLUE(.,name): > > #define _GLOBAL_TOC(name) _GLOBAL(name) > > +#define KASAN_OVERRIDE(x, y) \ > + .weak x; \ > + .set x, y > + Can you add a comment describing what that does and why? > diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile > index 879b36602748..fc4c42262694 100644 > --- a/arch/powerpc/kernel/Makefile > +++ b/arch/powerpc/kernel/Makefile > @@ -16,8 +16,9 @@ CFLAGS_prom_init.o += -fPIC > CFLAGS_btext.o += -fPIC > endif > > -CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > -CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > +CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING > +CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > -DDISABLE_BRANCH_PROFILING > +CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > -DDISABLE_BRANCH_PROFILING Why do we need to disable branch profiling now? I'd probably be happier if all the CFLAGS changes were done in a leadup patch to make them more obvious. > diff --git a/arch/powerpc/kernel/prom_init_check.sh > b/arch/powerpc/kernel/prom_init_check.sh > index 667df97d2595..da6bb16e0876 100644 > --- a/arch/powerpc/kernel/prom_init_check.sh > +++ b/arch/powerpc/kernel/prom_init_check.sh > @@ -16,8 +16,16 @@ > # If you really need to reference something from prom_init.o add > # it to the list below: > > +grep CONFIG_KASAN=y .config >/dev/null Just to be safe "^CONFIG_KASAN=y$" ? > +if [ $? -eq 0 ] > +then > + MEMFCT="__memcpy __memset" > +else > + MEMFCT="memcpy memset" > +fi MEM_FUNCS ? > diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile > index 3bf9fc6fd36c..ce8d4a9f810a 100644 > --- a/arch/powerpc/lib/Makefile > +++ b/arch/powerpc/lib/Makefile > @@ -8,6 +8,14 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) > CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) > CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) > > +KASAN_SANITIZE_code-patching.o := n > +KASAN_SANITIZE_feature-fixups.o := n > + > +ifdef CONFIG_KASAN > +CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING > +CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING > +endif There's that branch profiling again, though here it's only if KASAN is enabled. > diff --git a/arch/powerpc/mm/kasan_init.c b/arch/powerpc/mm/kasan_init.c > new file mode 100644 > index ..bd8e0a263e12 > --- /dev/null > +++ b/arch/powerpc/mm/kasan_init.c > @@ -0,0 +1,114 @@ > +// SPDX-License-Identifier: GPL-2.0 > + > +#define DISABLE_BRANCH_PROFILING > + > +#include > +#include > +#include > +#include > +#include > + > +void __init kasan_early_init(void) > +{ > + unsigned long addr = KASAN_SHADOW_START; > + unsigned long end = KASAN_SHADOW_END; > + unsigned long next; > + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr); Can none of those fail? cheers
Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
On 2/15/19 1:10 PM, Christophe Leroy wrote: > > > Le 15/02/2019 à 11:01, Andrey Ryabinin a écrit : >> >> >> On 2/15/19 11:41 AM, Christophe Leroy wrote: >>> >>> >>> Le 14/02/2019 à 23:04, Daniel Axtens a écrit : Hi Christophe, > --- a/arch/powerpc/include/asm/string.h > +++ b/arch/powerpc/include/asm/string.h > @@ -27,6 +27,20 @@ extern int memcmp(const void *,const void > *,__kernel_size_t); > extern void * memchr(const void *,int,__kernel_size_t); > extern void * memcpy_flushcache(void *,const void *,__kernel_size_t); > +void *__memset(void *s, int c, __kernel_size_t count); > +void *__memcpy(void *to, const void *from, __kernel_size_t n); > +void *__memmove(void *to, const void *from, __kernel_size_t n); > + > +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) > +/* > + * For files that are not instrumented (e.g. mm/slub.c) we > + * should use not instrumented version of mem* functions. > + */ > +#define memcpy(dst, src, len) __memcpy(dst, src, len) > +#define memmove(dst, src, len) __memmove(dst, src, len) > +#define memset(s, c, n) __memset(s, c, n) > +#endif > + I'm finding that I miss tests like 'kasan test: kasan_memcmp out-of-bounds in memcmp' because the uninstrumented asm version is used instead of an instrumented C version. I ended up guarding the relevant __HAVE_ARCH_x symbols behind a #ifndef CONFIG_KASAN and only exporting the arch versions if we're not compiled with KASAN. I find I need to guard and unexport strncpy, strncmp, memchr and memcmp. Do you need to do this on 32bit as well, or are those tests passing anyway for some reason? >>> >>> Indeed, I didn't try the KASAN test module recently, because my configs >>> don't have CONFIG_MODULE by default. >>> >>> Trying to test it now, I am discovering that module loading oopses with >>> latest version of my series, I need to figure out exactly why. Here below >>> the oops by modprobing test_module (the one supposed to just say hello to >>> the world). >>> >>> What we see is an access to the RO kasan zero area. >>> >>> The shadow mem is 0xf7c0..0xffc0 >>> Linear kernel memory is shadowed by 0xf7c0-0xf8bf >>> 0xf8c0-0xffc0 is shadowed read only by the kasan zero page. >>> >>> Why is kasan trying to access that ? Isn't kasan supposed to not check >>> stuff in vmalloc area ? >> >> It tries to poison global variables in modules. If module is in vmalloc, >> than it will try to poison vmalloc. >> Given that the vmalloc area is not so big on 32bits, the easiest solution is >> to cover all vmalloc with RW shadow. >> > > Euh ... Not so big ? > > Memory: 96448K/131072K available (8016K kernel code, 1680K rwdata > , 2720K rodata, 624K init, 4678K bss, 34624K reserved, 0K cma-reserved) > Kernel virtual memory layout: > * 0xffefc000..0xc000 : fixmap > * 0xf7c0..0xffc0 : kasan shadow mem > * 0xf7a0..0xf7c0 : consistent mem > * 0xf7a0..0xf7a0 : early ioremap > * 0xc900..0xf7a0 : vmalloc & ioremap > > Here, vmalloc area size 0x2ea0, that is 746Mbytes. Shadow for this would > be 93Mbytes and we are already using 16Mbytes to shadow the linear memory > area this poor board has 128Mbytes RAM in total. > > So another solution is needed. > Ok. As a temporary workaround your can make __asan_register_globals() to skip globals in vmalloc(). Obviously it means that out-of-bounds accesses to in modules will be missed. Non temporary solution would making kasan to fully support vmalloc, i.e. remove RO shadow and allocate/free shadow on vmalloc()/vfree(). But this feels like separate task, out of scope of this patch set. It is also possible to follow some other arches - dedicate separate address range for modules, allocate/free shadow in module_alloc/free. But it doesn't seem worthy to implement this only for the sake of kasan, since vmalloc support needs to be done anyway.
Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
Le 15/02/2019 à 11:01, Andrey Ryabinin a écrit : On 2/15/19 11:41 AM, Christophe Leroy wrote: Le 14/02/2019 à 23:04, Daniel Axtens a écrit : Hi Christophe, --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -27,6 +27,20 @@ extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); extern void * memcpy_flushcache(void *,const void *,__kernel_size_t); +void *__memset(void *s, int c, __kernel_size_t count); +void *__memcpy(void *to, const void *from, __kernel_size_t n); +void *__memmove(void *to, const void *from, __kernel_size_t n); + +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) +#endif + I'm finding that I miss tests like 'kasan test: kasan_memcmp out-of-bounds in memcmp' because the uninstrumented asm version is used instead of an instrumented C version. I ended up guarding the relevant __HAVE_ARCH_x symbols behind a #ifndef CONFIG_KASAN and only exporting the arch versions if we're not compiled with KASAN. I find I need to guard and unexport strncpy, strncmp, memchr and memcmp. Do you need to do this on 32bit as well, or are those tests passing anyway for some reason? Indeed, I didn't try the KASAN test module recently, because my configs don't have CONFIG_MODULE by default. Trying to test it now, I am discovering that module loading oopses with latest version of my series, I need to figure out exactly why. Here below the oops by modprobing test_module (the one supposed to just say hello to the world). What we see is an access to the RO kasan zero area. The shadow mem is 0xf7c0..0xffc0 Linear kernel memory is shadowed by 0xf7c0-0xf8bf 0xf8c0-0xffc0 is shadowed read only by the kasan zero page. Why is kasan trying to access that ? Isn't kasan supposed to not check stuff in vmalloc area ? It tries to poison global variables in modules. If module is in vmalloc, than it will try to poison vmalloc. Given that the vmalloc area is not so big on 32bits, the easiest solution is to cover all vmalloc with RW shadow. Euh ... Not so big ? Memory: 96448K/131072K available (8016K kernel code, 1680K rwdata , 2720K rodata, 624K init, 4678K bss, 34624K reserved, 0K cma-reserved) Kernel virtual memory layout: * 0xffefc000..0xc000 : fixmap * 0xf7c0..0xffc0 : kasan shadow mem * 0xf7a0..0xf7c0 : consistent mem * 0xf7a0..0xf7a0 : early ioremap * 0xc900..0xf7a0 : vmalloc & ioremap Here, vmalloc area size 0x2ea0, that is 746Mbytes. Shadow for this would be 93Mbytes and we are already using 16Mbytes to shadow the linear memory area this poor board has 128Mbytes RAM in total. So another solution is needed. Christophe
Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
On 2/15/19 11:41 AM, Christophe Leroy wrote: > > > Le 14/02/2019 à 23:04, Daniel Axtens a écrit : >> Hi Christophe, >> >>> --- a/arch/powerpc/include/asm/string.h >>> +++ b/arch/powerpc/include/asm/string.h >>> @@ -27,6 +27,20 @@ extern int memcmp(const void *,const void >>> *,__kernel_size_t); >>> extern void * memchr(const void *,int,__kernel_size_t); >>> extern void * memcpy_flushcache(void *,const void *,__kernel_size_t); >>> +void *__memset(void *s, int c, __kernel_size_t count); >>> +void *__memcpy(void *to, const void *from, __kernel_size_t n); >>> +void *__memmove(void *to, const void *from, __kernel_size_t n); >>> + >>> +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) >>> +/* >>> + * For files that are not instrumented (e.g. mm/slub.c) we >>> + * should use not instrumented version of mem* functions. >>> + */ >>> +#define memcpy(dst, src, len) __memcpy(dst, src, len) >>> +#define memmove(dst, src, len) __memmove(dst, src, len) >>> +#define memset(s, c, n) __memset(s, c, n) >>> +#endif >>> + >> >> I'm finding that I miss tests like 'kasan test: kasan_memcmp >> out-of-bounds in memcmp' because the uninstrumented asm version is used >> instead of an instrumented C version. I ended up guarding the relevant >> __HAVE_ARCH_x symbols behind a #ifndef CONFIG_KASAN and only exporting >> the arch versions if we're not compiled with KASAN. >> >> I find I need to guard and unexport strncpy, strncmp, memchr and >> memcmp. Do you need to do this on 32bit as well, or are those tests >> passing anyway for some reason? > > Indeed, I didn't try the KASAN test module recently, because my configs don't > have CONFIG_MODULE by default. > > Trying to test it now, I am discovering that module loading oopses with > latest version of my series, I need to figure out exactly why. Here below the > oops by modprobing test_module (the one supposed to just say hello to the > world). > > What we see is an access to the RO kasan zero area. > > The shadow mem is 0xf7c0..0xffc0 > Linear kernel memory is shadowed by 0xf7c0-0xf8bf > 0xf8c0-0xffc0 is shadowed read only by the kasan zero page. > > Why is kasan trying to access that ? Isn't kasan supposed to not check stuff > in vmalloc area ? It tries to poison global variables in modules. If module is in vmalloc, than it will try to poison vmalloc. Given that the vmalloc area is not so big on 32bits, the easiest solution is to cover all vmalloc with RW shadow.
Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
Le 14/02/2019 à 23:04, Daniel Axtens a écrit : Hi Christophe, --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -27,6 +27,20 @@ extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); extern void * memcpy_flushcache(void *,const void *,__kernel_size_t); +void *__memset(void *s, int c, __kernel_size_t count); +void *__memcpy(void *to, const void *from, __kernel_size_t n); +void *__memmove(void *to, const void *from, __kernel_size_t n); + +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) +#endif + I'm finding that I miss tests like 'kasan test: kasan_memcmp out-of-bounds in memcmp' because the uninstrumented asm version is used instead of an instrumented C version. I ended up guarding the relevant __HAVE_ARCH_x symbols behind a #ifndef CONFIG_KASAN and only exporting the arch versions if we're not compiled with KASAN. I find I need to guard and unexport strncpy, strncmp, memchr and memcmp. Do you need to do this on 32bit as well, or are those tests passing anyway for some reason? Indeed, I didn't try the KASAN test module recently, because my configs don't have CONFIG_MODULE by default. Trying to test it now, I am discovering that module loading oopses with latest version of my series, I need to figure out exactly why. Here below the oops by modprobing test_module (the one supposed to just say hello to the world). What we see is an access to the RO kasan zero area. The shadow mem is 0xf7c0..0xffc0 Linear kernel memory is shadowed by 0xf7c0-0xf8bf 0xf8c0-0xffc0 is shadowed read only by the kasan zero page. Why is kasan trying to access that ? Isn't kasan supposed to not check stuff in vmalloc area ? [ 189.087499] BUG: Unable to handle kernel data access at 0xf8eb7818 [ 189.093455] Faulting instruction address: 0xc001ab60 [ 189.098383] Oops: Kernel access of bad area, sig: 11 [#1] [ 189.103732] BE PAGE_SIZE=16K PREEMPT CMPC885 [ 189.111414] Modules linked in: test_module(+) [ 189.115817] CPU: 0 PID: 514 Comm: modprobe Not tainted 5.0.0-rc5-s3k-dev-00645-g1dd3acf23952 #1016 [ 189.124627] NIP: c001ab60 LR: c0194fe8 CTR: 0003 [ 189.129641] REGS: c5645b90 TRAP: 0300 Not tainted (5.0.0-rc5-s3k-dev-00645-g1dd3acf23952) [ 189.137924] MSR: 9032 CR: 44002884 XER: [ 189.144571] DAR: f8eb7818 DSISR: 8a00 [ 189.144571] GPR00: c0196620 c5645c40 c5aac000 f8eb7818 0003 f8eb7817 c01950d0 [ 189.144571] GPR08: c00c2720 c95bc010 c95bc1a0 c01965ec 100d7b30 c0802b80 c5ae0308 [ 189.144571] GPR16: c5990480 0124 000f c00bcf7c c5ae0324 c95bc32c 06b8 0001 [ 189.144571] GPR24: c95bc364 c95bc360 c95bc2c0 c95bc1a0 0002 0018 f8eb781b [ 189.182611] NIP [c001ab60] __memset+0xb4/0xc0 [ 189.186922] LR [c0194fe8] kasan_unpoison_shadow+0x34/0x54 [ 189.192136] Call Trace: [ 189.194682] [c5645c50] [c0196620] __asan_register_globals+0x34/0x74 [ 189.200900] [c5645c70] [c00c27a4] do_init_module+0xbc/0x5a4 [ 189.206392] [c5645ca0] [c00c0d08] load_module+0x2b5c/0x3194 [ 189.211901] [c5645e70] [c00c14c8] sys_init_module+0x188/0x1bc [ 189.217572] [c5645f40] [c001311c] ret_from_syscall+0x0/0x38 [ 189.223049] --- interrupt: c01 at 0xfda2b50 [ 189.223049] LR = 0x10014b18 [ 189.230175] Instruction dump: [ 189.233117] 4200fffc 70a50003 4d820020 7ca903a6 38c60003 9c860001 4200fffc 4e800020 [ 189.240859] 2c05 4d820020 7ca903a6 38c3 <9c860001> 4200fffc 4e800020 7c032040 [ 189.248809] ---[ end trace 45cbb1b3215e5959 ]--- Christophe Regards, Daniel #ifdef CONFIG_PPC64 #define __HAVE_ARCH_MEMSET32 #define __HAVE_ARCH_MEMSET64 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 879b36602748..fc4c42262694 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -16,8 +16,9 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o+= -fPIC endif -CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING +CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -DDISABLE_BRANCH_PROFILING +CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -DDISABLE_BRANCH_PROFILING CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) @@ -31,6 +32,10 @@ CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE) endif +KASAN_SANITIZE_early_32.o := n +KASAN_SANITIZE_cputable.o := n +KASAN_SANITIZE_prom_init.o := n + obj-y :=
Re: [PATCH v5 3/3] powerpc/32: Add KASAN support
Hi Christophe, > --- a/arch/powerpc/include/asm/string.h > +++ b/arch/powerpc/include/asm/string.h > @@ -27,6 +27,20 @@ extern int memcmp(const void *,const void > *,__kernel_size_t); > extern void * memchr(const void *,int,__kernel_size_t); > extern void * memcpy_flushcache(void *,const void *,__kernel_size_t); > > +void *__memset(void *s, int c, __kernel_size_t count); > +void *__memcpy(void *to, const void *from, __kernel_size_t n); > +void *__memmove(void *to, const void *from, __kernel_size_t n); > + > +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) > +/* > + * For files that are not instrumented (e.g. mm/slub.c) we > + * should use not instrumented version of mem* functions. > + */ > +#define memcpy(dst, src, len) __memcpy(dst, src, len) > +#define memmove(dst, src, len) __memmove(dst, src, len) > +#define memset(s, c, n) __memset(s, c, n) > +#endif > + I'm finding that I miss tests like 'kasan test: kasan_memcmp out-of-bounds in memcmp' because the uninstrumented asm version is used instead of an instrumented C version. I ended up guarding the relevant __HAVE_ARCH_x symbols behind a #ifndef CONFIG_KASAN and only exporting the arch versions if we're not compiled with KASAN. I find I need to guard and unexport strncpy, strncmp, memchr and memcmp. Do you need to do this on 32bit as well, or are those tests passing anyway for some reason? Regards, Daniel > #ifdef CONFIG_PPC64 > #define __HAVE_ARCH_MEMSET32 > #define __HAVE_ARCH_MEMSET64 > diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile > index 879b36602748..fc4c42262694 100644 > --- a/arch/powerpc/kernel/Makefile > +++ b/arch/powerpc/kernel/Makefile > @@ -16,8 +16,9 @@ CFLAGS_prom_init.o += -fPIC > CFLAGS_btext.o += -fPIC > endif > > -CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > -CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > +CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING > +CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > -DDISABLE_BRANCH_PROFILING > +CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > -DDISABLE_BRANCH_PROFILING > CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) > > @@ -31,6 +32,10 @@ CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE) > CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE) > endif > > +KASAN_SANITIZE_early_32.o := n > +KASAN_SANITIZE_cputable.o := n > +KASAN_SANITIZE_prom_init.o := n > + > obj-y:= cputable.o ptrace.o syscalls.o \ > irq.o align.o signal_32.o pmc.o vdso.o \ > process.o systbl.o idle.o \ > diff --git a/arch/powerpc/kernel/asm-offsets.c > b/arch/powerpc/kernel/asm-offsets.c > index 9ffc72ded73a..846fb30b1190 100644 > --- a/arch/powerpc/kernel/asm-offsets.c > +++ b/arch/powerpc/kernel/asm-offsets.c > @@ -783,5 +783,9 @@ int main(void) > DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE)); > #endif > > +#ifdef CONFIG_KASAN > + DEFINE(KASAN_SHADOW_OFFSET, KASAN_SHADOW_OFFSET); > +#endif > + > return 0; > } > diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S > index 05b08db3901d..0ec9dec06bc2 100644 > --- a/arch/powerpc/kernel/head_32.S > +++ b/arch/powerpc/kernel/head_32.S > @@ -962,6 +962,9 @@ start_here: > * Do early platform-specific initialization, > * and set up the MMU. > */ > +#ifdef CONFIG_KASAN > + bl kasan_early_init > +#endif > li r3,0 > mr r4,r31 > bl machine_init > diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S > index b19d78410511..5d6ff8fa7e2b 100644 > --- a/arch/powerpc/kernel/head_40x.S > +++ b/arch/powerpc/kernel/head_40x.S > @@ -848,6 +848,9 @@ start_here: > /* > * Decide what sort of machine this is and initialize the MMU. > */ > +#ifdef CONFIG_KASAN > + bl kasan_early_init > +#endif > li r3,0 > mr r4,r31 > bl machine_init > diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S > index bf23c19c92d6..7ca14dff6192 100644 > --- a/arch/powerpc/kernel/head_44x.S > +++ b/arch/powerpc/kernel/head_44x.S > @@ -203,6 +203,9 @@ _ENTRY(_start); > /* > * Decide what sort of machine this is and initialize the MMU. > */ > +#ifdef CONFIG_KASAN > + bl kasan_early_init > +#endif > li r3,0 > mr r4,r31 > bl machine_init > diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S > index 0fea10491f3a..6a644ea2e6b6 100644 > --- a/arch/powerpc/kernel/head_8xx.S > +++ b/arch/powerpc/kernel/head_8xx.S > @@ -823,6 +823,9 @@ start_here: > /* > * Decide what sort of machine this is and initialize the MMU. > */ > +#ifdef CONFIG_KASAN > + bl kasan_early_init > +#endif > li r3,0 > mr r4,r31 > bl machine_init > diff --git a/arch/powerpc/kernel/head_fsl_booke.S >
[PATCH v5 3/3] powerpc/32: Add KASAN support
This patch adds KASAN support for PPC32. The KASAN shadow area is located between the vmalloc area and the fixmap area. KASAN_SHADOW_OFFSET is calculated in asm/kasan.h and extracted by Makefile prepare rule via asm-offsets.h Note that on book3s it will only work on the 603 because the other ones use hash table and can therefore not share a single PTE table covering the entire early KASAN shadow area. Signed-off-by: Christophe Leroy --- arch/powerpc/Kconfig | 1 + arch/powerpc/Makefile| 7 ++ arch/powerpc/include/asm/book3s/32/pgtable.h | 2 + arch/powerpc/include/asm/kasan.h | 24 ++ arch/powerpc/include/asm/nohash/32/pgtable.h | 2 + arch/powerpc/include/asm/ppc_asm.h | 4 + arch/powerpc/include/asm/setup.h | 5 ++ arch/powerpc/include/asm/string.h| 14 arch/powerpc/kernel/Makefile | 9 ++- arch/powerpc/kernel/asm-offsets.c| 4 + arch/powerpc/kernel/head_32.S| 3 + arch/powerpc/kernel/head_40x.S | 3 + arch/powerpc/kernel/head_44x.S | 3 + arch/powerpc/kernel/head_8xx.S | 3 + arch/powerpc/kernel/head_fsl_booke.S | 3 + arch/powerpc/kernel/prom_init_check.sh | 10 ++- arch/powerpc/kernel/setup-common.c | 2 + arch/powerpc/lib/Makefile| 8 ++ arch/powerpc/lib/copy_32.S | 9 ++- arch/powerpc/mm/Makefile | 3 + arch/powerpc/mm/dump_linuxpagetables.c | 8 ++ arch/powerpc/mm/kasan_init.c | 114 +++ arch/powerpc/mm/mem.c| 4 + 23 files changed, 239 insertions(+), 6 deletions(-) create mode 100644 arch/powerpc/include/asm/kasan.h create mode 100644 arch/powerpc/mm/kasan_init.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 08908219fba9..850b06def84f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -175,6 +175,7 @@ config PPC select GENERIC_TIME_VSYSCALL select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KASAN if PPC32 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index ac033341ed55..f0738099e31e 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -427,6 +427,13 @@ else endif endif +ifdef CONFIG_KASAN +prepare: kasan_prepare + +kasan_prepare: prepare0 + $(eval KASAN_SHADOW_OFFSET = $(shell awk '{if ($$2 == "KASAN_SHADOW_OFFSET") print $$3;}' include/generated/asm-offsets.h)) +endif + # Check toolchain versions: # - gcc-4.6 is the minimum kernel-wide version so nothing required. checkbin: diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 49d76adb9bc5..4543016f80ca 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -141,6 +141,8 @@ static inline bool pte_user(pte_t pte) */ #ifdef CONFIG_HIGHMEM #define KVIRT_TOP PKMAP_BASE +#elif defined(CONFIG_KASAN) +#define KVIRT_TOP KASAN_SHADOW_START #else #define KVIRT_TOP (0xfe00UL) /* for now, could be FIXMAP_BASE ? */ #endif diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h new file mode 100644 index ..5d0088429b62 --- /dev/null +++ b/arch/powerpc/include/asm/kasan.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define KASAN_SHADOW_SIZE ((~0UL - PAGE_OFFSET + 1) >> KASAN_SHADOW_SCALE_SHIFT) + +#define KASAN_SHADOW_START (ALIGN_DOWN(FIXADDR_START - KASAN_SHADOW_SIZE, \ + PGDIR_SIZE)) +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) +#define KASAN_SHADOW_OFFSET(KASAN_SHADOW_START - \ +(PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT)) + +void kasan_early_init(void); +void kasan_init(void); + +#endif +#endif diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index bed433358260..b3b52f02be1a 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -71,6 +71,8 @@ extern int icache_44x_need_flush; */ #ifdef CONFIG_HIGHMEM #define KVIRT_TOP PKMAP_BASE +#elif defined(CONFIG_KASAN) +#define KVIRT_TOP KASAN_SHADOW_START #else #define KVIRT_TOP (0xfe00UL) /* for now, could be FIXMAP_BASE ? */ #endif diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index e0637730a8e7..dba2c1038363 100644 ---