PASEMI: PCI_SCAN_ALL_PCIE_DEVS
Hi All, Could you please add Olof‘s patch. Without this patch, we have to always add 'pci=pcie_scan_all' to the kernel boot arguments. Please add it. Olof's patch for P.A. Semi boards: --- arch/powerpc/platforms/pasemi/pci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index 5ff6108..ea54ed2 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -224,6 +224,8 @@ void __init pas_pci_init(void) return; } + pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS); + for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) if (np->name && !strcmp(np->name, "pxp") && !pas_add_bridge(np)) of_node_get(np); --- Thanks, Christian
[PATCHv4 3/3] ppc64 boot: Wait for boot cpu to show up if nr_cpus limit is about to hit.
From: Mahesh SalgaonkarThe kernel boot parameter 'nr_cpus=' allows one to specify number of possible cpus in the system. In the normal scenario the first cpu (cpu0) that shows up is the boot cpu and hence it gets covered under nr_cpus limit. But this assumption will be broken in kdump scenario where kdump kenrel after a crash can boot up on an non-zero boot cpu. The paca structure allocation depends on value of nr_cpus and is indexed using logical cpu ids. This definetly will be an issue if boot cpu id > nr_cpus This patch modifies allocate_pacas() and smp_setup_cpu_maps() to accommodate boot cpu for the case where boot_cpuid > nr_cpu_ids. This change would help to reduce the memory reservation requirement for kdump on ppc64. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Thadeu Lima de Souza Cascardo Tested-by: Guilherme G. Piccoli Signed-off-by: Pingfan Liu (separate the logical for cpu id mapping) Signed-off-by: Pingfan Liu --- arch/powerpc/include/asm/paca.h | 3 +++ arch/powerpc/kernel/paca.c | 19 ++- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b62c310..49ab29d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -49,6 +49,9 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ #define get_lppaca() (get_paca()->lppaca_ptr) #define get_slb_shadow() (get_paca()->slb_shadow_ptr) +/* Maximum number of threads per core. */ +#defineMAX_SMT 8 + struct task_struct; /* diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 95ffedf..13be6ab 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -209,6 +209,7 @@ void __init allocate_pacas(void) { u64 limit; int cpu; + unsigned int nr_cpus_aligned; #ifdef CONFIG_PPC_BOOK3S_64 /* @@ -220,20 +221,28 @@ void __init allocate_pacas(void) limit = ppc64_rma_size; #endif - paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); + /* +* Alloc the paca[] align up to SMT threads. +* This will help us to prepare for a situation where +* boot cpu id > nr_cpus. +* We keep the schema of nr_cpus in kernel cmdline, but +* waste a bit memory +*/ + nr_cpus_aligned = _ALIGN_UP(nr_cpu_ids, MAX_SMT); + paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus_aligned); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n", - paca_size, nr_cpu_ids, paca); + paca_size, nr_cpus_aligned, paca); - allocate_lppacas(nr_cpu_ids, limit); + allocate_lppacas(nr_cpus_aligned, limit); - allocate_slb_shadows(nr_cpu_ids, limit); + allocate_slb_shadows(nr_cpus_aligned, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ - for (cpu = 0; cpu < nr_cpu_ids; cpu++) + for (cpu = 0; cpu < nr_cpus_aligned; cpu++) initialise_paca([cpu], cpu); } -- 2.7.4
[PATCHv4 2/3] powerpc, cpu: handling the special case when boot_cpuid greater than nr_cpus
For kexec -p, after boot_cpuid is mapping into the range of [0, threads_per_core), then if nr_cpus is small, we will have the bitmap [0,..., nr_cpus, ..., boot_cpuid, ...). This patch chooses cpus inside the range of [boot_cpuid - nr_cpus +1, ..., boot_cpuid] to be online. With this patch and the next, on a P9 machine with thread_per_core=4, and set nr_cpus=2 for the crash kernel. After taskset -c 11 sh -c "echo c > /proc/sysrq-trigger" Then kdump:/sys/devices/system/cpu# cat possible 2-3 kdump:/sys/devices/system/cpu# cat present 2-3 kdump:/sys/devices/system/cpu# cat online 2-3 Signed-off-by: Pingfan Liu--- arch/powerpc/kernel/setup-common.c | 14 -- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 1a67344..6920b5e 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -462,6 +462,8 @@ void __init smp_setup_cpu_maps(void) struct device_node *dn; struct device_node *boot_dn = NULL; bool handling_bootdn = true; + int head_thread = 0; + int online_cnt = 0; int cpu = 0; int nthreads = 1; @@ -499,13 +501,19 @@ void __init smp_setup_cpu_maps(void) if (boot_cpuid < nthreads && be32_to_cpu(intserv[boot_cpuid]) == boot_cpuhwid) { boot_dn = dn; + /* choose a bunch of continous threads */ + if (boot_cpuid > nr_cpu_ids - 1) { + head_thread = boot_cpuid - nr_cpu_ids + 1; + /* keep the mapping of logical and thread */ + cpu = head_thread; + } } if (boot_dn == NULL) continue; } else if (dn == boot_dn) continue; - for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { + for (j = head_thread; j < nthreads && online_cnt < nr_cpu_ids; j++) { bool avail; DBG("thread %d -> cpu %d (hard id %d)\n", @@ -520,13 +528,15 @@ void __init smp_setup_cpu_maps(void) set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); set_cpu_possible(cpu, true); cpu++; + online_cnt++; } - if (cpu >= nr_cpu_ids) { + if (online_cnt >= nr_cpu_ids) { of_node_put(dn); break; } if (handling_bootdn) { + head_thread = 0; handling_bootdn = false; goto again; } -- 2.7.4
[PATCHv4 1/3] powerpc, cpu: partially unbind the mapping between cpu logical id and its seq in dt
For kexec -p, the boot cpu can be not the cpu0, this causes the problem to alloc paca[]. In theory, there is no requirement to assign cpu's logical id as its present seq by device tree. But we have something like cpu_first_thread_sibling(), which makes assumption on the mapping inside a core. Hence partially changing the mapping, i.e. unbind the mapping of core while keep the mapping inside a core. After this patch, boot-cpu will always be mapped into the range [0,threads_per_core). Signed-off-by: Pingfan Liu--- arch/powerpc/include/asm/smp.h | 1 + arch/powerpc/kernel/prom.c | 25 ++--- arch/powerpc/kernel/setup-common.c | 21 + 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index fac963e..1299100 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -30,6 +30,7 @@ #include extern int boot_cpuid; +extern int boot_cpuhwid; extern int spinning_secondaries; extern void cpu_die(void); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index da67606..d0ebb25 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -315,8 +315,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, const __be32 *intserv; int i, nthreads; int len; - int found = -1; - int found_thread = 0; + bool found = false; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) @@ -341,8 +340,11 @@ static int __init early_init_dt_scan_cpus(unsigned long node, if (fdt_version(initial_boot_params) >= 2) { if (be32_to_cpu(intserv[i]) == fdt_boot_cpuid_phys(initial_boot_params)) { - found = boot_cpu_count; - found_thread = i; + /* always map the boot-cpu logical id into the +* the range of [0, thread_per_core) +*/ + boot_cpuid = i; + found = true; } } else { /* @@ -351,8 +353,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * off secondary threads. */ if (of_get_flat_dt_prop(node, - "linux,boot-cpu", NULL) != NULL) - found = boot_cpu_count; + "linux,boot-cpu", NULL) != NULL) { + boot_cpuid = i; + found = true; + } } #ifdef CONFIG_SMP /* logical cpu id is always 0 on UP kernels */ @@ -361,13 +365,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node, } /* Not the boot CPU */ - if (found < 0) + if (!found) return 0; - DBG("boot cpu: logical %d physical %d\n", found, - be32_to_cpu(intserv[found_thread])); - boot_cpuid = found; - set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread])); + boot_cpuhwid = be32_to_cpu(intserv[boot_cpuid]); + DBG("boot cpu: logical %d physical %d\n", boot_cpuid, boot_cpuhwid); + set_hard_smp_processor_id(boot_cpuid, boot_cpuhwid); /* * PAPR defines "logical" PVR values for cpus that diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 66f7cc6..1a67344 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -86,6 +86,7 @@ struct machdep_calls *machine_id; EXPORT_SYMBOL(machine_id); int boot_cpuid = -1; +int boot_cpuhwid = -1; EXPORT_SYMBOL_GPL(boot_cpuid); /* @@ -459,11 +460,17 @@ static void __init cpu_init_thread_core_maps(int tpc) void __init smp_setup_cpu_maps(void) { struct device_node *dn; + struct device_node *boot_dn = NULL; + bool handling_bootdn = true; int cpu = 0; int nthreads = 1; DBG("smp_setup_cpu_maps()\n"); +again: + /* E.g. kexec will not boot from the 1st core. So firstly loop to find out +* the dn of boot-cpu, and map them onto [0, nthreads) +*/ for_each_node_by_type(dn, "cpu") { const __be32 *intserv; __be32 cpu_be; @@ -488,6 +495,16 @@ void __init smp_setup_cpu_maps(void) nthreads = len / sizeof(int); + if (handling_bootdn) { + if (boot_cpuid < nthreads && + be32_to_cpu(intserv[boot_cpuid]) == boot_cpuhwid) { + boot_dn = dn; + } +
[PATCHv4 0/3] enable nr_cpus for powerpc
This topic has a very long history. It comes from Mahesh SalgaonkarFor v3: https://patchwork.ozlabs.org/patch/834860/ In this series, I separate and change the mapping between cpu logical id and hwid. I hope we can acquire it for "kexec -p" Mahesh Salgaonkar (1): ppc64 boot: Wait for boot cpu to show up if nr_cpus limit is about to hit. Pingfan Liu (2): powerpc, cpu: partially unbind the mapping between cpu logical id and its seq in dt powerpc, cpu: handling the special case when boot_cpuid greater than nr_cpus arch/powerpc/include/asm/paca.h| 3 +++ arch/powerpc/include/asm/smp.h | 1 + arch/powerpc/kernel/paca.c | 19 ++- arch/powerpc/kernel/prom.c | 25 ++--- arch/powerpc/kernel/setup-common.c | 35 +-- 5 files changed, 65 insertions(+), 18 deletions(-) -- 2.7.4
Re: [PATCH v8 2/6] module: allow symbol exports to be disabled
On Sun, 11 Mar 2018, Ard Biesheuvel wrote: > To allow existing C code to be incorporated into the decompressor or > the UEFI stub, introduce a CPP macro that turns all EXPORT_SYMBOL_xxx > declarations into nops, and #define it in places where such exports > are undesirable. Note that this gets rid of a rather dodgy redefine > of linux/export.h's header guard. > > Signed-off-by: Ard BiesheuvelAcked-by: Nicolas Pitre > --- > arch/x86/boot/compressed/kaslr.c | 5 + > drivers/firmware/efi/libstub/Makefile | 3 ++- > include/linux/export.h| 11 ++- > 3 files changed, 13 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/boot/compressed/kaslr.c > b/arch/x86/boot/compressed/kaslr.c > index 8199a6187251..3a2a6d7049e4 100644 > --- a/arch/x86/boot/compressed/kaslr.c > +++ b/arch/x86/boot/compressed/kaslr.c > @@ -23,11 +23,8 @@ > * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h. > * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL > * which is meaningless and will cause compiling error in some cases. > - * So do not include linux/export.h and define EXPORT_SYMBOL(sym) > - * as empty. > */ > -#define _LINUX_EXPORT_H > -#define EXPORT_SYMBOL(sym) > +#define __DISABLE_EXPORTS > > #include "misc.h" > #include "error.h" > diff --git a/drivers/firmware/efi/libstub/Makefile > b/drivers/firmware/efi/libstub/Makefile > index 7b3ba40f0745..896a882c89f4 100644 > --- a/drivers/firmware/efi/libstub/Makefile > +++ b/drivers/firmware/efi/libstub/Makefile > @@ -20,7 +20,8 @@ cflags-$(CONFIG_EFI_ARMSTUB)+= > -I$(srctree)/scripts/dtc/libfdt > KBUILD_CFLAGS:= $(cflags-y) > -DDISABLE_BRANCH_PROFILING \ > -D__NO_FORTIFY \ > $(call cc-option,-ffreestanding) \ > -$(call cc-option,-fno-stack-protector) > +$(call cc-option,-fno-stack-protector) \ > +-D__DISABLE_EXPORTS > > GCOV_PROFILE := n > KASAN_SANITIZE := n > diff --git a/include/linux/export.h b/include/linux/export.h > index 1a1dfdb2a5c6..25005b55b079 100644 > --- a/include/linux/export.h > +++ b/include/linux/export.h > @@ -72,7 +72,16 @@ extern struct module __this_module; > __attribute__((section("___ksymtab" sec "+" #sym), used)) \ > = { (unsigned long), __kstrtab_##sym } > > -#if defined(__KSYM_DEPS__) > +#if defined(__DISABLE_EXPORTS) > + > +/* > + * Allow symbol exports to be disabled completely so that C code may > + * be reused in other execution contexts such as the UEFI stub or the > + * decompressor. > + */ > +#define __EXPORT_SYMBOL(sym, sec) > + > +#elif defined(__KSYM_DEPS__) > > /* > * For fine grained build dependencies, we want to tell the build system > -- > 2.15.1 > >
Re: [PATCH v8 1/6] arch: enable relative relocations for arm64, power and x86
On Sun, Mar 11, 2018 at 1:35 PM, Ard Biesheuvelwrote: > > I'm sure all of these architectures define some kind of 32-bit place > relative relocation in their ELF psABI, and I see how it would be > cleaner to change everything at once, but I anticipate a long tail of > issues with toolchains for niche architectures that I have no way to > test. Ok, fair enough. Linus
Re: [PATCH v8 1/6] arch: enable relative relocations for arm64, power and x86
On 11 March 2018 at 20:20, Linus Torvaldswrote: > On Sun, Mar 11, 2018 at 5:38 AM, Ard Biesheuvel > wrote: >> Before updating certain subsystems to use place relative 32-bit >> relocations in special sections, to save space and reduce the >> number of absolute relocations that need to be processed at runtime >> by relocatable kernels, introduce the Kconfig symbol and define it >> for some architectures that should be able to support and benefit >> from it. > > Are there actually architectures hat _don't_ support those relative > 32-bit relocations? That really feels pretty fundamental. > I started out doing all of them, and I don't remember the exact list, but there are quite a few architectures that don't support these relocations in their module loaders, and in fact, not even ARM does (as one of the 'big' architectures). This is not really surprising, given that the C compiler never emits such relative references. > I would have expected all of them to do it - is your selection of > arm64/powerpc/x86 perhaps more about "I have tested these" than about > anything else? > > Because I'd almost prefer to just do the switch-over unconditionally > if that is at all possible? > arm64, powerpc and x86 implement CONFIG_RELOCATABLE, and so they benefit more than other architectures, because of the fact that the relocation metadata for these data structures can be dropped entirely. Other 64-bit architectures only have the 50% size reduction benefit, and 32-bit architectures have no benefit at all. I'm sure all of these architectures define some kind of 32-bit place relative relocation in their ELF psABI, and I see how it would be cleaner to change everything at once, but I anticipate a long tail of issues with toolchains for niche architectures that I have no way to test.
Re: [PATCH v8 1/6] arch: enable relative relocations for arm64, power and x86
On Sun, Mar 11, 2018 at 5:38 AM, Ard Biesheuvelwrote: > Before updating certain subsystems to use place relative 32-bit > relocations in special sections, to save space and reduce the > number of absolute relocations that need to be processed at runtime > by relocatable kernels, introduce the Kconfig symbol and define it > for some architectures that should be able to support and benefit > from it. Are there actually architectures hat _don't_ support those relative 32-bit relocations? That really feels pretty fundamental. I would have expected all of them to do it - is your selection of arm64/powerpc/x86 perhaps more about "I have tested these" than about anything else? Because I'd almost prefer to just do the switch-over unconditionally if that is at all possible? Linus
Re: [PATCH v8 0/6] add support for relative references in special sections
On Sun, Mar 11, 2018 at 5:44 AM, Ard Biesheuvelwrote: > On 11 March 2018 at 12:38, Ard Biesheuvel wrote: >> Cc: James Morris > > Note when replying: this ^^^ email address no longer works. James updated MAINTAINERS to: James Morris -- Kees Cook Pixel Security
Re: [PATCH v8 3/6] module: use relative references for __ksymtab entries
On 11 March 2018 at 12:38, Ard Biesheuvelwrote: > An ordinary arm64 defconfig build has ~64 KB worth of __ksymtab > entries, each consisting of two 64-bit fields containing absolute > references, to the symbol itself and to a char array containing > its name, respectively. > > When we build the same configuration with KASLR enabled, we end > up with an additional ~192 KB of relocations in the .init section, > i.e., one 24 byte entry for each absolute reference, which all need > to be processed at boot time. > > Given how the struct kernel_symbol that describes each entry is > completely local to module.c (except for the references emitted > by EXPORT_SYMBOL() itself), we can easily modify it to contain > two 32-bit relative references instead. This reduces the size of > the __ksymtab section by 50% for all 64-bit architectures, and > gets rid of the runtime relocations entirely for architectures > implementing KASLR, either via standard PIE linking (arm64) or > using custom host tools (x86). > > Note that the binary search involving __ksymtab contents relies > on each section being sorted by symbol name. This is implemented > based on the input section names, not the names in the ksymtab > entries, so this patch does not interfere with that. > > Given that the use of place-relative relocations requires support > both in the toolchain and in the module loader, we cannot enable > this feature for all architectures. So make it dependent on whether > CONFIG_HAVE_ARCH_PREL32_RELOCATIONS is defined. > > Cc: Arnd Bergmann > Cc: Andrew Morton > Cc: Ingo Molnar > Cc: Kees Cook > Cc: Thomas Garnier > Cc: Nicolas Pitre > Acked-by: Jessica Yu > Signed-off-by: Ard Biesheuvel > --- > arch/x86/include/asm/Kbuild | 1 + > arch/x86/include/asm/export.h | 5 --- > include/asm-generic/export.h | 12 - > include/linux/compiler.h | 19 > include/linux/export.h| 46 +++- > kernel/module.c | 32 +++--- > 6 files changed, 91 insertions(+), 24 deletions(-) > ... > diff --git a/include/linux/compiler.h b/include/linux/compiler.h > index ab4711c63601..0a9328ea9dbd 100644 > --- a/include/linux/compiler.h > +++ b/include/linux/compiler.h > @@ -280,6 +280,25 @@ unsigned long read_word_at_a_time(const void *addr) > > #endif /* __KERNEL__ */ > > +/* > + * Force the compiler to emit 'sym' as a symbol, so that we can reference > + * it from inline assembler. Necessary in case 'sym' could be inlined > + * otherwise, or eliminated entirely due to lack of references that are > + * visible to the compiler. > + */ > +#define __ADDRESSABLE(sym) \ > + static void * const __attribute__((section(".discard"), used)) \ > + __PASTE(__addressable_##sym, __LINE__) = (void *) > + kernelci.org tells me that I need to drop the 'const' here, or we may end up with .discard sections with conflicting attributes (r/o vs r/w) in some cases (CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y)
Re: [PATCH v8 0/6] add support for relative references in special sections
On 11 March 2018 at 12:38, Ard Biesheuvelwrote: ... > Cc: Arnd Bergmann > Cc: Kees Cook > Cc: Will Deacon > Cc: Michael Ellerman > Cc: Thomas Garnier > Cc: Thomas Gleixner > Cc: "Serge E. Hallyn" > Cc: Bjorn Helgaas > Cc: Benjamin Herrenschmidt > Cc: Russell King > Cc: Paul Mackerras > Cc: Catalin Marinas > Cc: Petr Mladek > Cc: Ingo Molnar > Cc: James Morris Note when replying: this ^^^ email address no longer works. > Cc: Andrew Morton > Cc: Nicolas Pitre > Cc: Josh Poimboeuf > Cc: Steven Rostedt > Cc: Sergey Senozhatsky > Cc: Linus Torvalds > Cc: Jessica Yu > > Cc: linux-arm-ker...@lists.infradead.org > Cc: linux-ker...@vger.kernel.org > Cc: linuxppc-dev@lists.ozlabs.org > Cc: x...@kernel.org > ...
[PATCH v8 6/6] kernel: tracepoints: add support for relative references
To avoid the need for relocating absolute references to tracepoint structures at boot time when running relocatable kernels (which may take a disproportionate amount of space), add the option to emit these tables as relative references instead. Cc: Ingo MolnarAcked-by: Steven Rostedt (VMware) Signed-off-by: Ard Biesheuvel --- include/linux/tracepoint.h | 19 ++-- kernel/tracepoint.c| 49 +++- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index c94f466d57ef..cdb0a9461d71 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -225,6 +225,19 @@ extern void syscall_unregfunc(void); return static_key_false(&__tracepoint_##name.key); \ } +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define __TRACEPOINT_ENTRY(name)\ + asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ + " .balign 4\n" \ + " .long " VMLINUX_SYMBOL_STR(__tracepoint_##name) " - .\n" \ + " .previous\n") +#else +#define __TRACEPOINT_ENTRY(name)\ + static struct tracepoint * const __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ + &__tracepoint_##name +#endif + /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration @@ -234,11 +247,9 @@ extern void syscall_unregfunc(void); static const char __tpstrtab_##name[]\ __attribute__((section("__tracepoints_strings"))) = #name; \ struct tracepoint __tracepoint_##name\ - __attribute__((section("__tracepoints"))) = \ + __attribute__((section("__tracepoints"), used)) =\ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ - static struct tracepoint * const __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ - &__tracepoint_##name; + __TRACEPOINT_ENTRY(name); #define DEFINE_TRACE(name) \ DEFINE_TRACE_FN(name, NULL, NULL); diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 671b13457387..4ce8cc4bf2c3 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -326,6 +326,27 @@ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); +static void for_each_tracepoint_range(struct tracepoint * const *begin, + struct tracepoint * const *end, + void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + if (!begin) + return; + + if (IS_ENABLED(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)) { + const int *iter; + + for (iter = (const int *)begin; iter < (const int *)end; iter++) + fct(offset_to_ptr(iter), priv); + } else { + struct tracepoint * const *iter; + + for (iter = begin; iter < end; iter++) + fct(*iter, priv); + } +} + #ifdef CONFIG_MODULES bool trace_module_has_bad_taint(struct module *mod) { @@ -390,15 +411,9 @@ EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); * Ensure the tracer unregistered the module's probes before the module * teardown is performed. Prevents leaks of probe and data pointers. */ -static void tp_module_going_check_quiescent(struct tracepoint * const *begin, - struct tracepoint * const *end) +static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) { - struct tracepoint * const *iter; - - if (!begin) - return; - for (iter = begin; iter < end; iter++) - WARN_ON_ONCE((*iter)->funcs); + WARN_ON_ONCE(tp->funcs); } static int tracepoint_module_coming(struct module *mod) @@ -449,8 +464,9 @@ static void tracepoint_module_going(struct module *mod) * Called the going notifier before checking for * quiescence. */ - tp_module_going_check_quiescent(mod->tracepoints_ptrs, - mod->tracepoints_ptrs + mod->num_tracepoints); + for_each_tracepoint_range(mod->tracepoints_ptrs, + mod->tracepoints_ptrs + mod->num_tracepoints, + tp_module_going_check_quiescent, NULL); break;
[PATCH v8 5/6] PCI: Add support for relative addressing in quirk tables
Allow the PCI quirk tables to be emitted in a way that avoids absolute references to the hook functions. This reduces the size of the entries, and, more importantly, makes them invariant under runtime relocation (e.g., for KASLR) Acked-by: Bjorn HelgaasSigned-off-by: Ard Biesheuvel --- drivers/pci/quirks.c | 12 +--- include/linux/pci.h | 20 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8b14bd326d4a..24abfaccd2a0 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3563,9 +3563,15 @@ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, f->vendor == (u16) PCI_ANY_ID) && (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { - calltime = fixup_debug_start(dev, f->hook); - f->hook(dev); - fixup_debug_report(dev, calltime, f->hook); + void (*hook)(struct pci_dev *dev); +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + hook = offset_to_ptr(>hook_offset); +#else + hook = f->hook; +#endif + calltime = fixup_debug_start(dev, hook); + hook(dev); + fixup_debug_report(dev, calltime, hook); } } diff --git a/include/linux/pci.h b/include/linux/pci.h index 024a1beda008..765044bb9e8e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1790,7 +1790,11 @@ struct pci_fixup { u16 device; /* Or PCI_ANY_ID */ u32 class; /* Or PCI_ANY_ID */ unsigned int class_shift; /* should be 0, 8, 16 */ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + int hook_offset; +#else void (*hook)(struct pci_dev *dev); +#endif }; enum pci_fixup_pass { @@ -1804,12 +1808,28 @@ enum pci_fixup_pass { pci_fixup_suspend_late, /* pci_device_suspend_late() */ }; +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook) \ + __ADDRESSABLE(hook) \ + asm(".section " #sec ", \"a\" \n" \ + ".balign16 \n" \ + ".short " #vendor ", " #device " \n" \ + ".long "#class ", " #class_shift " \n" \ + ".long "VMLINUX_SYMBOL_STR(hook) " - . \n" \ + ".previous \n"); +#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,\ + class_shift, hook)\ + __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook) +#else /* Anonymous variables would be nice... */ #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ class_shift, hook)\ static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ __attribute__((__section__(#section), aligned((sizeof(void *)\ = { vendor, device, class, class_shift, hook }; +#endif #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ class_shift, hook) \ -- 2.15.1
[PATCH v8 4/6] init: allow initcall tables to be emitted using relative references
Allow the initcall tables to be emitted using relative references that are only half the size on 64-bit architectures and don't require fixups at runtime on relocatable kernels. Cc: Petr MladekCc: Sergey Senozhatsky Cc: Steven Rostedt Cc: James Morris Cc: "Serge E. Hallyn" Signed-off-by: Ard Biesheuvel --- include/linux/init.h | 44 +++- init/main.c| 32 +++--- kernel/printk/printk.c | 4 +- security/security.c| 4 +- 4 files changed, 53 insertions(+), 31 deletions(-) diff --git a/include/linux/init.h b/include/linux/init.h index 506a98151131..cca0f76bf4a6 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -116,8 +116,24 @@ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); -extern initcall_t __con_initcall_start[], __con_initcall_end[]; -extern initcall_t __security_initcall_start[], __security_initcall_end[]; +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +typedef int initcall_entry_t; + +static inline initcall_t initcall_from_entry(initcall_entry_t *entry) +{ + return offset_to_ptr(entry); +} +#else +typedef initcall_t initcall_entry_t; + +static inline initcall_t initcall_from_entry(initcall_entry_t *entry) +{ + return *entry; +} +#endif + +extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; +extern initcall_entry_t __security_initcall_start[], __security_initcall_end[]; /* Used for contructor calls. */ typedef void (*ctor_fn_t)(void); @@ -167,9 +183,20 @@ extern bool initcall_debug; * as KEEP() in the linker script. */ -#define __define_initcall(fn, id) \ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define ___define_initcall(fn, id, __sec) \ + __ADDRESSABLE(fn) \ + asm(".section \"" #__sec ".init\", \"a\" \n" \ + "__initcall_" #fn #id ":\n" \ + ".long "VMLINUX_SYMBOL_STR(fn) " - .\n" \ + ".previous \n"); +#else +#define ___define_initcall(fn, id, __sec) \ static initcall_t __initcall_##fn##id __used \ - __attribute__((__section__(".initcall" #id ".init"))) = fn; + __attribute__((__section__(#__sec ".init"))) = fn; +#endif + +#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id) /* * Early initcalls run before initializing SMP. @@ -208,13 +235,8 @@ extern bool initcall_debug; #define __exitcall(fn) \ static exitcall_t __exitcall_##fn __exit_call = fn -#define console_initcall(fn) \ - static initcall_t __initcall_##fn \ - __used __section(.con_initcall.init) = fn - -#define security_initcall(fn) \ - static initcall_t __initcall_##fn \ - __used __section(.security_initcall.init) = fn +#define console_initcall(fn) ___define_initcall(fn,, .con_initcall) +#define security_initcall(fn) ___define_initcall(fn,, .security_initcall) struct obs_kernel_param { const char *str; diff --git a/init/main.c b/init/main.c index a8100b954839..d81487cc126d 100644 --- a/init/main.c +++ b/init/main.c @@ -848,18 +848,18 @@ int __init_or_module do_one_initcall(initcall_t fn) } -extern initcall_t __initcall_start[]; -extern initcall_t __initcall0_start[]; -extern initcall_t __initcall1_start[]; -extern initcall_t __initcall2_start[]; -extern initcall_t __initcall3_start[]; -extern initcall_t __initcall4_start[]; -extern initcall_t __initcall5_start[]; -extern initcall_t __initcall6_start[]; -extern initcall_t __initcall7_start[]; -extern initcall_t __initcall_end[]; - -static initcall_t *initcall_levels[] __initdata = { +extern initcall_entry_t __initcall_start[]; +extern initcall_entry_t __initcall0_start[]; +extern initcall_entry_t __initcall1_start[]; +extern initcall_entry_t __initcall2_start[]; +extern initcall_entry_t __initcall3_start[]; +extern initcall_entry_t __initcall4_start[]; +extern initcall_entry_t __initcall5_start[]; +extern initcall_entry_t __initcall6_start[]; +extern initcall_entry_t __initcall7_start[]; +extern initcall_entry_t __initcall_end[]; + +static initcall_entry_t *initcall_levels[] __initdata = { __initcall0_start, __initcall1_start, __initcall2_start, @@ -885,7 +885,7 @@ static char *initcall_level_names[] __initdata = { static void __init do_initcall_level(int level) { - initcall_t *fn; + initcall_entry_t *fn; strcpy(initcall_command_line, saved_command_line); parse_args(initcall_level_names[level], @@ -895,7 +895,7 @@ static void __init do_initcall_level(int level) NULL, _env_string); for (fn =
[PATCH v8 3/6] module: use relative references for __ksymtab entries
An ordinary arm64 defconfig build has ~64 KB worth of __ksymtab entries, each consisting of two 64-bit fields containing absolute references, to the symbol itself and to a char array containing its name, respectively. When we build the same configuration with KASLR enabled, we end up with an additional ~192 KB of relocations in the .init section, i.e., one 24 byte entry for each absolute reference, which all need to be processed at boot time. Given how the struct kernel_symbol that describes each entry is completely local to module.c (except for the references emitted by EXPORT_SYMBOL() itself), we can easily modify it to contain two 32-bit relative references instead. This reduces the size of the __ksymtab section by 50% for all 64-bit architectures, and gets rid of the runtime relocations entirely for architectures implementing KASLR, either via standard PIE linking (arm64) or using custom host tools (x86). Note that the binary search involving __ksymtab contents relies on each section being sorted by symbol name. This is implemented based on the input section names, not the names in the ksymtab entries, so this patch does not interfere with that. Given that the use of place-relative relocations requires support both in the toolchain and in the module loader, we cannot enable this feature for all architectures. So make it dependent on whether CONFIG_HAVE_ARCH_PREL32_RELOCATIONS is defined. Cc: Arnd BergmannCc: Andrew Morton Cc: Ingo Molnar Cc: Kees Cook Cc: Thomas Garnier Cc: Nicolas Pitre Acked-by: Jessica Yu Signed-off-by: Ard Biesheuvel --- arch/x86/include/asm/Kbuild | 1 + arch/x86/include/asm/export.h | 5 --- include/asm-generic/export.h | 12 - include/linux/compiler.h | 19 include/linux/export.h| 46 +++- kernel/module.c | 32 +++--- 6 files changed, 91 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index de690c2d2e33..a0ab9ab61c75 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -8,5 +8,6 @@ generated-y += xen-hypercalls.h generic-y += dma-contiguous.h generic-y += early_ioremap.h +generic-y += export.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h deleted file mode 100644 index 2a51d66689c5.. --- a/arch/x86/include/asm/export.h +++ /dev/null @@ -1,5 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifdef CONFIG_64BIT -#define KSYM_ALIGN 16 -#endif -#include diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 719db1968d81..97ce606459ae 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -5,12 +5,10 @@ #define KSYM_FUNC(x) x #endif #ifdef CONFIG_64BIT -#define __put .quad #ifndef KSYM_ALIGN #define KSYM_ALIGN 8 #endif #else -#define __put .long #ifndef KSYM_ALIGN #define KSYM_ALIGN 4 #endif @@ -25,6 +23,16 @@ #define KSYM(name) name #endif +.macro __put, val, name +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + .long \val - ., \name - . +#elif defined(CONFIG_64BIT) + .quad \val, \name +#else + .long \val, \name +#endif +.endm + /* * note on .section use: @progbits vs %progbits nastiness doesn't matter, * since we immediately emit into those sections anyway. diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ab4711c63601..0a9328ea9dbd 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -280,6 +280,25 @@ unsigned long read_word_at_a_time(const void *addr) #endif /* __KERNEL__ */ +/* + * Force the compiler to emit 'sym' as a symbol, so that we can reference + * it from inline assembler. Necessary in case 'sym' could be inlined + * otherwise, or eliminated entirely due to lack of references that are + * visible to the compiler. + */ +#define __ADDRESSABLE(sym) \ + static void * const __attribute__((section(".discard"), used)) \ + __PASTE(__addressable_##sym, __LINE__) = (void *) + +/** + * offset_to_ptr - convert a relative memory offset to an absolute pointer + * @off: the address of the 32-bit offset value + */ +static inline void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} + #endif /* __ASSEMBLY__ */ #ifndef __optimize diff --git a/include/linux/export.h b/include/linux/export.h index 25005b55b079..04c78e6bfec9 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -24,12 +24,6 @@ #define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x) #ifndef __ASSEMBLY__ -struct kernel_symbol -{ - unsigned long value; - const char *name; -}; - #ifdef MODULE extern struct module __this_module; #define THIS_MODULE (&__this_module) @@
[PATCH v8 0/6] add support for relative references in special sections
This adds support for emitting special sections such as initcall arrays, PCI fixups and tracepoints as relative references rather than absolute references. This reduces the size by 50% on 64-bit architectures, but more importantly, it removes the need for carrying relocation metadata for these sections in relocatable kernels (e.g., for KASLR) that needs to be fixed up at boot time. On arm64, this reduces the vmlinux footprint of such a reference by 8x (8 byte absolute reference + 24 byte RELA entry vs 4 byte relative reference) Patch #3 was sent out before as a single patch. This series supersedes the previous submission. This version makes relative ksymtab entries dependent on the new Kconfig symbol HAVE_ARCH_PREL32_RELOCATIONS rather than trying to infer from kbuild test robot replies for which architectures it should be blacklisted. Patch #1 introduces the new Kconfig symbol HAVE_ARCH_PREL32_RELOCATIONS, and sets it for the main architectures that are expected to benefit the most from this feature, i.e., 64-bit architectures or ones that use runtime relocations. Patch #2 add support for #define'ing __DISABLE_EXPORTS to get rid of ksymtab/kcrctab sections in decompressor and EFI stub objects when rebuilding existing C files to run in a different context. Patches #4 - #6 implement relative references for initcalls, PCI fixups and tracepoints, respectively, all of which produce sections with order ~1000 entries on an arm64 defconfig kernel with tracing enabled. This means we save about 28 KB of vmlinux space for each of these patches. [From the v7 series blurb, which included the jump_label patches as well]: For the arm64 kernel, all patches combined reduce the memory footprint of vmlinux by about 1.3 MB (using a config copied from Ubuntu that has KASLR enabled), of which ~1 MB is the size reduction of the RELA section in .init, and the remaining 300 KB is reduction of .text/.data. Branch: git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git relative-special-sections-v8 Changes since v7: - dropped the jump_label patches, these will be revisited in a separate series - reorder __DISABLE_EXPORTS with __KSYM_DEPS__ check in #2 - use offset_to_ptr() helper function to abstract the relative pointer conversion [int *off -> (ulong)off + *off] (#3 - #6) - rebase onto v4.16-rc3 Changes since v6: - drop S390 from patch #1 introducing HAVE_ARCH_PREL32_RELOCATIONS: kbuild robot threw me some s390 curveballs, and given that s390 does not define CONFIG_RELOCATABLE in the first place, it does not benefit as much from relative references as arm64, x86 and power do - add patch to allow symbol exports to be disabled at compilation unit granularity (#2) - get rid of arm64 vmlinux.lds.S hunk to ensure code generated by __ADDRESSABLE gets discarded from the EFI stub - it is no longer needed after adding #2 (#1) - change _ADDRESSABLE() to emit a data reference, not a code reference - this is another simplification made possible by patch #2 (#3) - add Steven's ack to #6 - split x86 jump_label patch into two (#9, #10) Changes since v5: - add missing jump_label prototypes to s390 jump_label.h (#6) - fix inverted condition in call to jump_entry_is_module_init() (#6) Changes since v4: - add patches to convert x86 and arm64 to use relative references for jump tables (#6 - #8) - rename PCI patch and add Bjorn's ack (#4) - rebase onto v4.15-rc5 Changes since v3: - fix module unload issue in patch #5 reported by Jessica, by reusing the updated routine for_each_tracepoint_range() for the quiescent check at module unload time; this requires this routine to be moved before tracepoint_module_going() in kernel/tracepoint.c - add Jessica's ack to #2 - rebase onto v4.14-rc1 Changes since v2: - Revert my slightly misguided attempt to appease checkpatch, which resulted in needless churn and worse code. This v3 is based on v1 with a few tweaks that were actually reasonable checkpatch warnings: unnecessary braces (as pointed out by Ingo) and other minor whitespace misdemeanors. Changes since v1: - Remove checkpatch errors to the extent feasible: in some cases, this involves moving extern declarations into C files, and switching to struct definitions rather than typedefs. Some errors are impossible to fix: please find the remaining ones after the diffstat. - Used 'int' instead if 'signed int' for the various offset fields: there is no ambiguity between architectures regarding its signedness (unlike 'char') - Refactor the different patches to be more uniform in the way they define the section entry type and accessors in the .h file, and avoid the need to add #ifdefs to the C code. Cc: Arnd BergmannCc: Kees Cook Cc: Will Deacon Cc: Michael Ellerman Cc: Thomas Garnier Cc: Thomas Gleixner Cc: "Serge E. Hallyn" Cc: Bjorn Helgaas
[PATCH v8 1/6] arch: enable relative relocations for arm64, power and x86
Before updating certain subsystems to use place relative 32-bit relocations in special sections, to save space and reduce the number of absolute relocations that need to be processed at runtime by relocatable kernels, introduce the Kconfig symbol and define it for some architectures that should be able to support and benefit from it. Cc: Catalin MarinasCc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Thomas Gleixner Cc: Ingo Molnar Cc: x...@kernel.org Signed-off-by: Ard Biesheuvel --- arch/Kconfig | 10 ++ arch/arm64/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/x86/Kconfig | 1 + 4 files changed, 13 insertions(+) diff --git a/arch/Kconfig b/arch/Kconfig index 76c0b54443b1..4e624f75823a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -975,4 +975,14 @@ config REFCOUNT_FULL against various use-after-free conditions that can be used in security flaw exploits. +config HAVE_ARCH_PREL32_RELOCATIONS + bool + help + May be selected by an architecture if it supports place-relative + 32-bit relocations, both in the toolchain and in the module loader, + in which case relative references can be used in special sections + for PCI fixup, initcalls etc which are only half the size on 64 bit + architectures, and don't require runtime relocation on relocatable + kernels. + source "kernel/gcov/Kconfig" diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7381eeb7ef8e..7c543667703e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -91,6 +91,7 @@ config ARM64 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 73ce5dd07642..21d5ad1608d8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -181,6 +181,7 @@ config PPC select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_CBPF_JITif !PPC64 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1236b187824..1c6e2ddd2fdc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -117,6 +117,7 @@ config X86 select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT + select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK -- 2.15.1
[PATCH v8 2/6] module: allow symbol exports to be disabled
To allow existing C code to be incorporated into the decompressor or the UEFI stub, introduce a CPP macro that turns all EXPORT_SYMBOL_xxx declarations into nops, and #define it in places where such exports are undesirable. Note that this gets rid of a rather dodgy redefine of linux/export.h's header guard. Signed-off-by: Ard Biesheuvel--- arch/x86/boot/compressed/kaslr.c | 5 + drivers/firmware/efi/libstub/Makefile | 3 ++- include/linux/export.h| 11 ++- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 8199a6187251..3a2a6d7049e4 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -23,11 +23,8 @@ * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h. * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL * which is meaningless and will cause compiling error in some cases. - * So do not include linux/export.h and define EXPORT_SYMBOL(sym) - * as empty. */ -#define _LINUX_EXPORT_H -#define EXPORT_SYMBOL(sym) +#define __DISABLE_EXPORTS #include "misc.h" #include "error.h" diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 7b3ba40f0745..896a882c89f4 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -20,7 +20,8 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ - $(call cc-option,-fno-stack-protector) + $(call cc-option,-fno-stack-protector) \ + -D__DISABLE_EXPORTS GCOV_PROFILE := n KASAN_SANITIZE := n diff --git a/include/linux/export.h b/include/linux/export.h index 1a1dfdb2a5c6..25005b55b079 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -72,7 +72,16 @@ extern struct module __this_module; __attribute__((section("___ksymtab" sec "+" #sym), used)) \ = { (unsigned long), __kstrtab_##sym } -#if defined(__KSYM_DEPS__) +#if defined(__DISABLE_EXPORTS) + +/* + * Allow symbol exports to be disabled completely so that C code may + * be reused in other execution contexts such as the UEFI stub or the + * decompressor. + */ +#define __EXPORT_SYMBOL(sym, sec) + +#elif defined(__KSYM_DEPS__) /* * For fine grained build dependencies, we want to tell the build system -- 2.15.1
[PATCH] powerpc: Use common error handling code in setup_new_fdt()
From: Markus ElfringDate: Sun, 11 Mar 2018 09:03:42 +0100 Add a jump target so that a bit of exception handling can be better reused at the end of this function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring --- arch/powerpc/kernel/machine_kexec_file_64.c | 28 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c index e4395f937d63..90c6004c2eec 100644 --- a/arch/powerpc/kernel/machine_kexec_file_64.c +++ b/arch/powerpc/kernel/machine_kexec_file_64.c @@ -302,18 +302,14 @@ int setup_new_fdt(const struct kimage *image, void *fdt, ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-start", initrd_load_addr); - if (ret < 0) { - pr_err("Error setting up the new device tree.\n"); - return -EINVAL; - } + if (ret < 0) + goto report_setup_failure; /* initrd-end is the first address after the initrd image. */ ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end", initrd_load_addr + initrd_len); - if (ret < 0) { - pr_err("Error setting up the new device tree.\n"); - return -EINVAL; - } + if (ret < 0) + goto report_setup_failure; ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len); if (ret) { @@ -325,10 +321,8 @@ int setup_new_fdt(const struct kimage *image, void *fdt, if (cmdline != NULL) { ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline); - if (ret < 0) { - pr_err("Error setting up the new device tree.\n"); - return -EINVAL; - } + if (ret < 0) + goto report_setup_failure; } else { ret = fdt_delprop(fdt, chosen_node, "bootargs"); if (ret && ret != -FDT_ERR_NOTFOUND) { @@ -344,10 +338,12 @@ int setup_new_fdt(const struct kimage *image, void *fdt, } ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0); - if (ret) { - pr_err("Error setting up the new device tree.\n"); - return -EINVAL; - } + if (ret) + goto report_setup_failure; return 0; + +report_setup_failure: + pr_err("Error setting up the new device tree.\n"); + return -EINVAL; } -- 2.16.2