Re: [PATCH 11/27] m68k: add missing FORCE and fix 'targets' to make if_changed work
On Thu, Jan 28, 2021 at 1:54 AM Masahiro Yamada wrote: > The rules in this Makefile cannot detect the command line change because > the prerequisite 'FORCE' is missing. > > Adding 'FORCE' will result in the headers being rebuilt every time > because the 'targets' addition is also wrong; the file paths in > 'targets' must be relative to the current Makefile. > > Fix all of them so the if_changed rules work correctly. > > Signed-off-by: Masahiro Yamada Acked-by: Geert Uytterhoeven Gr{oetje,eeting}s, Geert -- Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- ge...@linux-m68k.org In personal conversations with technical people, I call myself a hacker. But when I'm talking to journalists I just say "programmer" or something like that. -- Linus Torvalds
Re: [PATCH 2/3] kbuild: LD_VERSION redenomination
On Sun, Dec 13, 2020 at 1:54 AM Masahiro Yamada wrote: > > Commit ccbef1674a15 ("Kbuild, lto: add ld-version and ld-ifversion > macros") introduced scripts/ld-version.sh for GCC LTO. > > At that time, this script handled 5 version fields because GCC LTO > needed the downstream binutils. (https://lkml.org/lkml/2014/4/8/272) > > The code snippet from the submitted patch was as follows: > > # We need HJ Lu's Linux binutils because mainline binutils does not > # support mixing assembler and LTO code in the same ld -r object. > # XXX check if the gcc plugin ld is the expected one too > # XXX some Fedora binutils should also support it. How to check for that? > ifeq ($(call ld-ifversion,-ge,22710001,y),y) > ... > > However, GCC LTO was not merged into the mainline after all. > (https://lkml.org/lkml/2014/4/8/272) > > So, the 4th and 5th fields were never used, and finally removed by > commit 0d61ed17dd30 ("ld-version: Drop the 4th and 5th version > components"). > > Since then, the last 4-digits returned by this script is always zeros. > > Remove the meaningless last 4-digits. This makes the version format > consistent with GCC_VERSION, CLANG_VERSION, LLD_VERSION. > > Signed-off-by: Masahiro Yamada > --- > Applied to linux-kbuild. > arch/arm64/Kconfig| 2 +- > arch/mips/loongson64/Platform | 2 +- > arch/mips/vdso/Kconfig| 2 +- > arch/powerpc/Makefile | 2 +- > arch/powerpc/lib/Makefile | 2 +- > scripts/ld-version.sh | 2 +- > 6 files changed, 6 insertions(+), 6 deletions(-) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index a6b5b7ef40ae..69d56b21a6ec 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -1499,7 +1499,7 @@ config ARM64_PTR_AUTH > depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) > && AS_HAS_PAC > # Modern compilers insert a .note.gnu.property section note for PAC > # which is only understood by binutils starting with version 2.33.1. > - depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && > GCC_VERSION < 90100) > + depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && > GCC_VERSION < 90100) > depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE > depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) > help > diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform > index ec42c5085905..cc0b9c87f9ad 100644 > --- a/arch/mips/loongson64/Platform > +++ b/arch/mips/loongson64/Platform > @@ -35,7 +35,7 @@ cflags-$(CONFIG_CPU_LOONGSON64) += $(call > as-option,-Wa$(comma)-mno-fix-loongson > # can't easily be used safely within the kbuild framework. > # > ifeq ($(call cc-ifversion, -ge, 0409, y), y) > - ifeq ($(call ld-ifversion, -ge, 22500, y), y) > + ifeq ($(call ld-ifversion, -ge, 22500, y), y) > cflags-$(CONFIG_CPU_LOONGSON64) += \ >$(call cc-option,-march=loongson3a -U_MIPS_ISA > -D_MIPS_ISA=_MIPS_ISA_MIPS64) >else > diff --git a/arch/mips/vdso/Kconfig b/arch/mips/vdso/Kconfig > index 7aec721398d5..a665f6108cb5 100644 > --- a/arch/mips/vdso/Kconfig > +++ b/arch/mips/vdso/Kconfig > @@ -12,7 +12,7 @@ > # the lack of relocations. As such, we disable the VDSO for microMIPS builds. > > config MIPS_LD_CAN_LINK_VDSO > - def_bool LD_VERSION >= 22500 || LD_IS_LLD > + def_bool LD_VERSION >= 22500 || LD_IS_LLD > > config MIPS_DISABLE_VDSO > def_bool CPU_MICROMIPS || (!CPU_MIPSR6 && !MIPS_LD_CAN_LINK_VDSO) > diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile > index 5c8c06215dd4..6a9a852c3d56 100644 > --- a/arch/powerpc/Makefile > +++ b/arch/powerpc/Makefile > @@ -65,7 +65,7 @@ UTS_MACHINE := $(subst $(space),,$(machine-y)) > ifdef CONFIG_PPC32 > KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o > else > -ifeq ($(call ld-ifversion, -ge, 22500, y),y) > +ifeq ($(call ld-ifversion, -ge, 22500, y),y) > # Have the linker provide sfpr if possible. > # There is a corresponding test in arch/powerpc/lib/Makefile > KBUILD_LDFLAGS_MODULE += --save-restore-funcs > diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile > index 69a91b571845..d4efc182662a 100644 > --- a/arch/powerpc/lib/Makefile > +++ b/arch/powerpc/lib/Makefile > @@ -31,7 +31,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION)+= > error-inject.o > # 64-bit linker creates .sfpr on demand for final link (vmlinux), > # so it is only needed for modules, and only for older linkers which > # do not support --save-restore-funcs > -ifeq ($(call ld-ifversion, -lt, 22500, y),y) > +ifeq ($(call ld-ifversion, -lt, 22500, y),y) > extra-$(CONFIG_PPC64) += crtsavres.o > endif > > diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh > index f2be0ff9a738..0f8a2c0f9502 100755 > --- a/scripts/ld-version.sh > +++ b/scripts/ld-version.sh > @@ -6,6 +6,6 @@ > gsub(".*version ", ""); > gsub("-.*", ""); > split($1,a, ".
Re: [PATCH] powerpc64: Workaround sigtramp vdso return call.
+linuxppc-dev Excerpts from Raoni Fassina Firmino's message of January 28, 2021 2:21 am: > On Tue, Jan 26, 2021 at 08:45:00AM -0600, AL glibc-alpha wrote: >> >> >> On 1/26/21 8:12 AM, Florian Weimer via Libc-alpha wrote: >> > * Raoni Fassina Firmino: >> > >> > > A not so recent kernel change[1] changed how the trampoline >> > > `__kernel_sigtramp_rt64` is used to call signal handlers. >> > > >> > > This was exposed on the test misc/tst-sigcontext-get_pc >> > > >> > > Before kernel 5.9, the kernel set LR to the trampoline address and >> > > jumped directly to the signal handler, and at the end the signal >> > > handler, as any other function, would `blr` to the address set. In >> > > other words, the trampoline was executed just at the end of the signal >> > > handler and the only thing it did was call sigreturn. But since >> > > kernel 5.9 the kernel set CTRL to the signal handler and calls to the >> > > trampoline code, the trampoline then `bctrl` to the address in CTRL, >> > > setting the LR to the next instruction in the middle of the >> > > trampoline, when the signal handler returns, the rest of the >> > > trampoline code executes the same code as before. >> > >> > Thanks for the patch, byt: >> > >> > No one has explained so far why the original blr instruction couldn't be >> > augmented with the appropriate branch predictor hint. The 2.07 ISA >> > manual suggests that it's possible, but maybe I'm reading it wrong. >> >> bctrl is the preferred form of making indirect calls. You can add hint 0b01 >> to bclr (blr) to get similar behavior on power8/9, but as noted in the ISA, >> it is optional. > > What branch prediction we are talking about? I think there is only one > blr that is relevant, the one returning from the signal handler to the > trampoline. In this case it if it is a simple blr is already hinted > correctly with 0b00 (I think it is the default BH for blr), that it is a > subroutine return. We don't have control over the blr from the signal > handler to change the hint to 0b01 anyway. So IIUC, the return address > predictor failed before because the signal handler don't go back from > the same place (+4) it was called, and it changes with the added bctrl. > > I am CC'ing Nicholas and maybe he has more insight. Prior to the kernel patch, if the signal handler code used blr BH=0b01 for returns that would indeed prevent the unbalance on processors which implement it. But you are right, as explained in Linux commit 0138ba5783ae, the blr is in the signal handler function so we can't change that. > (I know that now this discussion is split in two places, the original > thread Florian started and this on for the patch. Not sure where best to > continue this) linuxppc-dev doesn't mind responsible cross posts to other lists, hopefully libc-alpha is too. Thanks, Nick
Re: [PATCH v15 09/10] arm64: Call kmalloc() to allocate DTB buffer
On Thu, 2021-01-28 at 00:52 -0300, Thiago Jung Bauermann wrote: > The problem is that this patch implements only part of the suggestion, > which isn't useful in itself. So the patch series should either drop > this patch or consolidate the FDT allocation between the arches. > > I just tested on powernv and pseries platforms and powerpc can use > vmalloc for the FDT buffer. Perhaps more sensible to use kvmalloc/kvfree.
Re: [PATCH v15 09/10] arm64: Call kmalloc() to allocate DTB buffer
On 1/27/21 8:14 PM, Thiago Jung Bauermann wrote: Lakshmi Ramasubramanian writes: On 1/27/21 7:52 PM, Thiago Jung Bauermann wrote: Will Deacon writes: On Wed, Jan 27, 2021 at 09:59:38AM -0800, Lakshmi Ramasubramanian wrote: On 1/27/21 8:52 AM, Will Deacon wrote: Hi Will, On Fri, Jan 15, 2021 at 09:30:16AM -0800, Lakshmi Ramasubramanian wrote: create_dtb() function allocates kernel virtual memory for the device tree blob (DTB). This is not consistent with other architectures, such as powerpc, which calls kmalloc() for allocating memory for the DTB. Call kmalloc() to allocate memory for the DTB, and kfree() to free the allocated memory. Co-developed-by: Prakhar Srivastava Signed-off-by: Prakhar Srivastava Signed-off-by: Lakshmi Ramasubramanian --- arch/arm64/kernel/machine_kexec_file.c | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 7de9c47dee7c..51c40143d6fa 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -29,7 +29,7 @@ const struct kexec_file_ops * const kexec_file_loaders[] = { int arch_kimage_file_post_load_cleanup(struct kimage *image) { - vfree(image->arch.dtb); + kfree(image->arch.dtb); image->arch.dtb = NULL; vfree(image->arch.elf_headers); @@ -59,19 +59,21 @@ static int create_dtb(struct kimage *image, + cmdline_len + DTB_EXTRA_SPACE; for (;;) { - buf = vmalloc(buf_size); + buf = kmalloc(buf_size, GFP_KERNEL); Is there a functional need for this patch? I build the 'dtbs' target just now and sdm845-db845c.dtb is approaching 100K, which feels quite large for kmalloc(). Changing the allocation from vmalloc() to kmalloc() would help us further consolidate the DTB setup code for powerpc and arm64. Ok, but at the risk of allocation failure. Can powerpc use vmalloc() instead? I believe this patch stems from this suggestion by Rob Herring: This could be taken a step further and do the allocation of the new FDT. The difference is arm64 uses vmalloc and powerpc uses kmalloc. The arm64 version also retries with a bigger allocation. That seems unnecessary. in https://lore.kernel.org/linux-integrity/20201211221006.1052453-3-r...@kernel.org/ The problem is that this patch implements only part of the suggestion, which isn't useful in itself. So the patch series should either drop this patch or consolidate the FDT allocation between the arches. I just tested on powernv and pseries platforms and powerpc can use vmalloc for the FDT buffer. Thanks for verifying on powerpc platform Thiago. I'll update the patch to do the following: => Use vmalloc for FDT buffer allocation on powerpc => Keep vmalloc for arm64, but remove the retry on allocation. => Also, there was a memory leak of FDT buffer in the error code path on arm64, which I'll fix as well. Did I miss anything? Yes, you missed the second part of Rob's suggestion I was mentioning, which is factoring out the code which allocates the new FDT from both arm64 and powerpc. Sure - I'll address that. thanks, -lakshmi
Re: [PATCH v15 09/10] arm64: Call kmalloc() to allocate DTB buffer
Lakshmi Ramasubramanian writes: > On 1/27/21 7:52 PM, Thiago Jung Bauermann wrote: >> Will Deacon writes: >> >>> On Wed, Jan 27, 2021 at 09:59:38AM -0800, Lakshmi Ramasubramanian wrote: On 1/27/21 8:52 AM, Will Deacon wrote: Hi Will, > On Fri, Jan 15, 2021 at 09:30:16AM -0800, Lakshmi Ramasubramanian wrote: >> create_dtb() function allocates kernel virtual memory for >> the device tree blob (DTB). This is not consistent with other >> architectures, such as powerpc, which calls kmalloc() for allocating >> memory for the DTB. >> >> Call kmalloc() to allocate memory for the DTB, and kfree() to free >> the allocated memory. >> >> Co-developed-by: Prakhar Srivastava >> Signed-off-by: Prakhar Srivastava >> Signed-off-by: Lakshmi Ramasubramanian >> --- >>arch/arm64/kernel/machine_kexec_file.c | 12 +++- >>1 file changed, 7 insertions(+), 5 deletions(-) >> >> diff --git a/arch/arm64/kernel/machine_kexec_file.c >> b/arch/arm64/kernel/machine_kexec_file.c >> index 7de9c47dee7c..51c40143d6fa 100644 >> --- a/arch/arm64/kernel/machine_kexec_file.c >> +++ b/arch/arm64/kernel/machine_kexec_file.c >> @@ -29,7 +29,7 @@ const struct kexec_file_ops * const >> kexec_file_loaders[] = { >>int arch_kimage_file_post_load_cleanup(struct kimage *image) >>{ >> -vfree(image->arch.dtb); >> +kfree(image->arch.dtb); >> image->arch.dtb = NULL; >> vfree(image->arch.elf_headers); >> @@ -59,19 +59,21 @@ static int create_dtb(struct kimage *image, >> + cmdline_len + DTB_EXTRA_SPACE; >> for (;;) { >> -buf = vmalloc(buf_size); >> +buf = kmalloc(buf_size, GFP_KERNEL); > > Is there a functional need for this patch? I build the 'dtbs' target just > now and sdm845-db845c.dtb is approaching 100K, which feels quite large > for kmalloc(). Changing the allocation from vmalloc() to kmalloc() would help us further consolidate the DTB setup code for powerpc and arm64. >>> >>> Ok, but at the risk of allocation failure. Can powerpc use vmalloc() >>> instead? >> I believe this patch stems from this suggestion by Rob Herring: >> >>> This could be taken a step further and do the allocation of the new >>> FDT. The difference is arm64 uses vmalloc and powerpc uses kmalloc. The >>> arm64 version also retries with a bigger allocation. That seems >>> unnecessary. >> in >> https://lore.kernel.org/linux-integrity/20201211221006.1052453-3-r...@kernel.org/ >> The problem is that this patch implements only part of the suggestion, >> which isn't useful in itself. So the patch series should either drop >> this patch or consolidate the FDT allocation between the arches. >> I just tested on powernv and pseries platforms and powerpc can use >> vmalloc for the FDT buffer. >> > > Thanks for verifying on powerpc platform Thiago. > > I'll update the patch to do the following: > > => Use vmalloc for FDT buffer allocation on powerpc > => Keep vmalloc for arm64, but remove the retry on allocation. > => Also, there was a memory leak of FDT buffer in the error code path on > arm64, > which I'll fix as well. > > Did I miss anything? Yes, you missed the second part of Rob's suggestion I was mentioning, which is factoring out the code which allocates the new FDT from both arm64 and powerpc. -- Thiago Jung Bauermann IBM Linux Technology Center
Re: [PATCH 1/2] PCI/AER: Disable AER interrupt during suspend
On Thu, Jan 28, 2021 at 4:51 AM Bjorn Helgaas wrote: > > On Thu, Jan 28, 2021 at 01:31:00AM +0800, Kai-Heng Feng wrote: > > Commit 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in > > hint") enables ACS, and some platforms lose its NVMe after resume from > > firmware: > > [ 50.947816] pcieport :00:1b.0: DPC: containment event, status:0x1f01 > > source:0x > > [ 50.947817] pcieport :00:1b.0: DPC: unmasked uncorrectable error > > detected > > [ 50.947829] pcieport :00:1b.0: PCIe Bus Error: severity=Uncorrected > > (Non-Fatal), type=Transaction Layer, (Receiver ID) > > [ 50.947830] pcieport :00:1b.0: device [8086:06ac] error > > status/mask=0020/0001 > > [ 50.947831] pcieport :00:1b.0:[21] ACSViol(First) > > [ 50.947841] pcieport :00:1b.0: AER: broadcast error_detected message > > [ 50.947843] nvme nvme0: frozen state error detected, reset controller > > > > It happens right after ACS gets enabled during resume. > > > > To prevent that from happening, disable AER interrupt and enable it on > > system suspend and resume, respectively. > > Lots of questions here. Maybe this is what we'll end up doing, but I > am curious about why the error is reported in the first place. > > Is this a consequence of the link going down and back up? Could be. From the observations, it only happens when firmware suspend (S3) is used. Maybe it happens when it's gets powered up, but I don't have equipment to debug at hardware level. If we use non-firmware suspend method, enabling ACS after resume won't trip AER and DPC. > > Is it consequence of the device doing a DMA when it shouldn't? If it's doing DMA while suspending, the same error should also happen after NVMe is suspended and before PCIe port suspending. Furthermore, if non-firmware suspend method is used, there's so such issue, so less likely to be any DMA operation. > > Are we doing something in the wrong order during suspend? Or maybe > resume, since I assume the error is reported during resume? Yes the error is reported during resume. The suspend/resume order seems fine as non-firmware suspend doesn't have this issue. > > If we *do* take the error, why doesn't DPC recovery work? It works for the root port, but not for the NVMe drive: [ 50.947816] pcieport :00:1b.0: DPC: containment event, status:0x1f01 source:0x [ 50.947817] pcieport :00:1b.0: DPC: unmasked uncorrectable error detected [ 50.947829] pcieport :00:1b.0: PCIe Bus Error: severity=Uncorrected (Non-Fatal), type=Transaction Layer, (Receiver ID) [ 50.947830] pcieport :00:1b.0: device [8086:06ac] error status/mask=0020/0001 [ 50.947831] pcieport :00:1b.0:[21] ACSViol(First) [ 50.947841] pcieport :00:1b.0: AER: broadcast error_detected message [ 50.947843] nvme nvme0: frozen state error detected, reset controller [ 50.948400] ACPI: EC: event unblocked [ 50.948432] xhci_hcd :00:14.0: PME# disabled [ 50.948444] xhci_hcd :00:14.0: enabling bus mastering [ 50.949056] pcieport :00:1b.0: PME# disabled [ 50.949068] pcieport :00:1c.0: PME# disabled [ 50.949416] e1000e :00:1f.6: PME# disabled [ 50.949463] e1000e :00:1f.6: enabling bus mastering [ 50.951606] sd 0:0:0:0: [sda] Starting disk [ 50.951610] nvme :01:00.0: can't change power state from D3hot to D0 (config space inaccessible) [ 50.951730] nvme nvme0: Removing after probe failure status: -19 [ 50.952360] nvme nvme0: failed to set APST feature (-19) [ 50.971136] snd_hda_intel :00:1f.3: PME# disabled [ 51.089330] pcieport :00:1b.0: AER: broadcast resume message [ 51.089345] pcieport :00:1b.0: AER: device recovery successful But I think why recovery doesn't work for NVMe is for another discussion... Kai-Heng > > > Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=209149 > > Fixes: 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in hint") > > Signed-off-by: Kai-Heng Feng > > --- > > drivers/pci/pcie/aer.c | 18 ++ > > 1 file changed, 18 insertions(+) > > > > diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c > > index 77b0f2c45bc0..0e9a85530ae6 100644 > > --- a/drivers/pci/pcie/aer.c > > +++ b/drivers/pci/pcie/aer.c > > @@ -1365,6 +1365,22 @@ static int aer_probe(struct pcie_device *dev) > > return 0; > > } > > > > +static int aer_suspend(struct pcie_device *dev) > > +{ > > + struct aer_rpc *rpc = get_service_data(dev); > > + > > + aer_disable_rootport(rpc); > > + return 0; > > +} > > + > > +static int aer_resume(struct pcie_device *dev) > > +{ > > + struct aer_rpc *rpc = get_service_data(dev); > > + > > + aer_enable_rootport(rpc); > > + return 0; > > +} > > + > > /** > > * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP > > * @dev: pointer to Root Port, RCEC, or RCiEP > > @@ -1437,6 +1453,8 @@ static struct pcie_port_service_driver aerdriver =
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On 1/27/21 3:13 PM, Will Deacon wrote: On Wed, Jan 27, 2021 at 01:31:02PM -0500, Mimi Zohar wrote: On Wed, 2021-01-27 at 10:24 -0800, Lakshmi Ramasubramanian wrote: On 1/27/21 10:02 AM, Will Deacon wrote: On Wed, Jan 27, 2021 at 09:56:53AM -0800, Lakshmi Ramasubramanian wrote: On 1/27/21 8:54 AM, Will Deacon wrote: On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: Address and size of the buffer containing the IMA measurement log need to be passed from the current kernel to the next kernel on kexec. Add address and size fields to "struct kimage_arch" for ARM64 platform to hold the address and size of the IMA measurement log buffer. Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA is enabled, to indicate that the IMA measurement log information is present in the device tree for ARM64. Co-developed-by: Prakhar Srivastava Signed-off-by: Prakhar Srivastava Signed-off-by: Lakshmi Ramasubramanian Reviewed-by: Thiago Jung Bauermann --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/kexec.h | 5 + 2 files changed, 6 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1d466addb078..ea7f7fe3dccd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1094,6 +1094,7 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE + select HAVE_IMA_KEXEC if IMA help This is new version of kexec system call. This system call is file based and takes file descriptors as system call argument diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index d24b527e8c00..2bd19ccb6c43 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -100,6 +100,11 @@ struct kimage_arch { void *elf_headers; unsigned long elf_headers_mem; unsigned long elf_headers_sz; + +#ifdef CONFIG_IMA_KEXEC + phys_addr_t ima_buffer_addr; + size_t ima_buffer_size; +#endif Why do these need to be in the arch structure instead of 'struct kimage'? Currently, only powerpc and, with this patch set, arm64 have support for carrying forward IMA measurement list across kexec system call. The above fields are used for tracking IMA measurement list. Do you see a reason to move these fields to "struct kimage"? If they're gated on CONFIG_IMA_KEXEC, then it seems harmless for them to be added to the shared structure. Or are you saying that there are architectures which have CONFIG_IMA_KEXEC but do not want these fields? As far as I know, there are no other architectures that define CONFIG_IMA_KEXEC, but do not use these fields. Yes, CONFIG_IMA_KEXEC enables "carrying the IMA measurement list across a soft boot". The only arch that currently carries the IMA measurement across kexec is powerpc. Ok, in which case this sounds like it should be in the shared structure, no? Ok - I'll move the IMA kexec buffer fields from "struct kimage_arch" to "struct kimage" for both powerpc and arm64. thanks, -lakshmi
[PATCH v4 07/10] powerpc/signal64: Replace restore_sigcontext() w/ unsafe_restore_sigcontext()
Previously restore_sigcontext() performed a costly KUAP switch on every uaccess operation. These repeated uaccess switches cause a significant drop in signal handling performance. Rewrite restore_sigcontext() to assume that a userspace read access window is open. Replace all uaccess functions with their 'unsafe' versions which avoid the repeated uaccess switches. Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 68 - 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 4248e4489ff1..d668f8af18fe 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -326,14 +326,14 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, /* * Restore the sigcontext from the signal frame. */ - -static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, - struct sigcontext __user *sc) +#define unsafe_restore_sigcontext(tsk, set, sig, sc, e) \ + unsafe_op_wrap(__unsafe_restore_sigcontext(tsk, set, sig, sc), e) +static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set, + int sig, struct sigcontext __user *sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs; #endif - unsigned long err = 0; unsigned long save_r13 = 0; unsigned long msr; struct pt_regs *regs = tsk->thread.regs; @@ -348,27 +348,28 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, save_r13 = regs->gpr[13]; /* copy the GPRs */ - err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); - err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); + unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), + efault_out); + unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out); /* get MSR separately, transfer the LE bit if doing signal return */ - err |= __get_user(msr, &sc->gp_regs[PT_MSR]); + unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out); if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); - err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); - err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); - err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); - err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); - err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); + unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out); + unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out); + unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out); + unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out); + unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out); /* Don't allow userspace to set SOFTE */ set_trap_norestart(regs); - err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); - err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); - err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); + unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out); + unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out); + unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out); if (!sig) regs->gpr[13] = save_r13; if (set != NULL) - err |= __get_user(set->sig[0], &sc->oldmask); + unsafe_get_user(set->sig[0], &sc->oldmask, efault_out); /* * Force reload of FP/VEC. @@ -378,29 +379,28 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC - err |= __get_user(v_regs, &sc->v_regs); - if (err) - return err; + unsafe_get_user(v_regs, &sc->v_regs, efault_out); if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && (msr & MSR_VEC) != 0) { - err |= __copy_from_user(&tsk->thread.vr_state, v_regs, - 33 * sizeof(vector128)); + unsafe_copy_from_user(&tsk->thread.vr_state, v_regs, + 33 * sizeof(vector128), efault_out); tsk->thread.used_vr = true; } else if (tsk->thread.used_vr) { memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL) - err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]); + unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33
[PATCH v4 03/10] powerpc/signal64: Move non-inline functions out of setup_sigcontext()
There are non-inline functions which get called in setup_sigcontext() to save register state to the thread struct. Move these functions into a separate prepare_setup_sigcontext() function so that setup_sigcontext() can be refactored later into an "unsafe" version which assumes an open uaccess window. Non-inline functions should be avoided when uaccess is open. The majority of setup_sigcontext() can be refactored to execute in an "unsafe" context (uaccess window is opened) except for some non-inline functions. Move these out into a separate prepare_setup_sigcontext() function which must be called first and before opening up a uaccess window. A follow-up commit converts setup_sigcontext() to be "unsafe". Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 32 +--- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index f9e4a1ac440f..b211a8ea4f6e 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -79,6 +79,24 @@ static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc) } #endif +static void prepare_setup_sigcontext(struct task_struct *tsk, int ctx_has_vsx_region) +{ +#ifdef CONFIG_ALTIVEC + /* save altivec registers */ + if (tsk->thread.used_vr) + flush_altivec_to_thread(tsk); + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + tsk->thread.vrsave = mfspr(SPRN_VRSAVE); +#endif /* CONFIG_ALTIVEC */ + + flush_fp_to_thread(tsk); + +#ifdef CONFIG_VSX + if (tsk->thread.used_vsr && ctx_has_vsx_region) + flush_vsx_to_thread(tsk); +#endif /* CONFIG_VSX */ +} + /* * Set up the sigcontext for the signal frame. */ @@ -97,7 +115,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); - unsigned long vrsave; #endif struct pt_regs *regs = tsk->thread.regs; unsigned long msr = regs->msr; @@ -112,7 +129,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, /* save altivec registers */ if (tsk->thread.used_vr) { - flush_altivec_to_thread(tsk); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ err |= __copy_to_user(v_regs, &tsk->thread.vr_state, 33 * sizeof(vector128)); @@ -124,17 +140,10 @@ static long setup_sigcontext(struct sigcontext __user *sc, /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ - vrsave = 0; - if (cpu_has_feature(CPU_FTR_ALTIVEC)) { - vrsave = mfspr(SPRN_VRSAVE); - tsk->thread.vrsave = vrsave; - } - - err |= __put_user(vrsave, (u32 __user *)&v_regs[33]); + err |= __put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]); #else /* CONFIG_ALTIVEC */ err |= __put_user(0, &sc->v_regs); #endif /* CONFIG_ALTIVEC */ - flush_fp_to_thread(tsk); /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, tsk); @@ -150,7 +159,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, * VMX data. */ if (tsk->thread.used_vsr && ctx_has_vsx_region) { - flush_vsx_to_thread(tsk); v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, tsk); /* set MSR_VSX in the MSR value in the frame to @@ -655,6 +663,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, ctx_has_vsx_region = 1; if (old_ctx != NULL) { + prepare_setup_sigcontext(current, ctx_has_vsx_region); if (!access_ok(old_ctx, ctx_size) || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0, ctx_has_vsx_region) @@ -842,6 +851,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, #endif { err |= __put_user(0, &frame->uc.uc_link); + prepare_setup_sigcontext(tsk, 1); err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, 1); -- 2.26.1
[PATCH v4 08/10] powerpc/signal64: Rewrite handle_rt_signal64() to minimise uaccess switches
From: Daniel Axtens Add uaccess blocks and use the 'unsafe' versions of functions doing user access where possible to reduce the number of times uaccess has to be opened/closed. There is no 'unsafe' version of copy_siginfo_to_user, so move it slightly to allow for a "longer" uaccess block. Signed-off-by: Daniel Axtens Co-developed-by: Christopher M. Riedl Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 54 + 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index d668f8af18fe..a471e97589a8 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -849,44 +849,51 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, unsigned long msr = regs->msr; frame = get_sigframe(ksig, tsk, sizeof(*frame), 0); - if (!access_ok(frame, sizeof(*frame))) - goto badframe; - err |= __put_user(&frame->info, &frame->pinfo); - err |= __put_user(&frame->uc, &frame->puc); - err |= copy_siginfo_to_user(&frame->info, &ksig->info); - if (err) + /* This only applies when calling unsafe_setup_sigcontext() and must be +* called before opening the uaccess window. +*/ + if (!MSR_TM_ACTIVE(msr)) + prepare_setup_sigcontext(tsk, 1); + + if (!user_write_access_begin(frame, sizeof(*frame))) goto badframe; + unsafe_put_user(&frame->info, &frame->pinfo, badframe_block); + unsafe_put_user(&frame->uc, &frame->puc, badframe_block); + /* Create the ucontext. */ - err |= __put_user(0, &frame->uc.uc_flags); - err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); + unsafe_put_user(0, &frame->uc.uc_flags, badframe_block); + unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block); if (MSR_TM_ACTIVE(msr)) { #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ - err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); + unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block); + + user_write_access_end(); + err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, msr); + + if (!user_write_access_begin(frame, sizeof(struct rt_sigframe))) + goto badframe; + #endif } else { - err |= __put_user(0, &frame->uc.uc_link); - prepare_setup_sigcontext(tsk, 1); - if (!user_write_access_begin(frame, sizeof(struct rt_sigframe))) - return -EFAULT; - err |= __unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, - ksig->sig, NULL, - (unsigned long)ksig->ka.sa.sa_handler, 1); - user_write_access_end(); + unsafe_put_user(0, &frame->uc.uc_link, badframe_block); + unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, + NULL, (unsigned long)ksig->ka.sa.sa_handler, + 1, badframe_block); } - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) - goto badframe; + + unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block); + user_write_access_end(); /* Make sure signal handler doesn't get spurious FP exceptions */ tsk->thread.fp_state.fpscr = 0; @@ -901,6 +908,11 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, regs->nip = (unsigned long) &frame->tramp[0]; } + + /* Save the siginfo outside of the unsafe block. */ + if (copy_siginfo_to_user(&frame->info, &ksig->info)) + goto badframe; + /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); @@ -940,6 +952,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, return 0; +badframe_block: + user_write_access_end(); badframe: signal_fault(current, regs, "handle_rt_signal64", frame); -- 2.26.1
[PATCH v4 06/10] powerpc/signal64: Replace setup_sigcontext() w/ unsafe_setup_sigcontext()
Previously setup_sigcontext() performed a costly KUAP switch on every uaccess operation. These repeated uaccess switches cause a significant drop in signal handling performance. Rewrite setup_sigcontext() to assume that a userspace write access window is open. Replace all uaccess functions with their 'unsafe' versions which avoid the repeated uaccess switches. Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 70 - 1 file changed, 43 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 8e1d804ce552..4248e4489ff1 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -101,9 +101,13 @@ static void prepare_setup_sigcontext(struct task_struct *tsk, int ctx_has_vsx_re * Set up the sigcontext for the signal frame. */ -static long setup_sigcontext(struct sigcontext __user *sc, - struct task_struct *tsk, int signr, sigset_t *set, - unsigned long handler, int ctx_has_vsx_region) +#define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, \ + ctx_has_vsx_region, e) \ + unsafe_op_wrap(__unsafe_setup_sigcontext(sc, tsk, signr, set, \ + handler, ctx_has_vsx_region), e) +static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc, + struct task_struct *tsk, int signr, sigset_t *set, + unsigned long handler, int ctx_has_vsx_region) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of @@ -118,20 +122,19 @@ static long setup_sigcontext(struct sigcontext __user *sc, #endif struct pt_regs *regs = tsk->thread.regs; unsigned long msr = regs->msr; - long err = 0; /* Force usr to alway see softe as 1 (interrupts enabled) */ unsigned long softe = 0x1; BUG_ON(tsk != current); #ifdef CONFIG_ALTIVEC - err |= __put_user(v_regs, &sc->v_regs); + unsafe_put_user(v_regs, &sc->v_regs, efault_out); /* save altivec registers */ if (tsk->thread.used_vr) { /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ - err |= __copy_to_user(v_regs, &tsk->thread.vr_state, - 33 * sizeof(vector128)); + unsafe_copy_to_user(v_regs, &tsk->thread.vr_state, + 33 * sizeof(vector128), efault_out); /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) * contains valid data. */ @@ -140,12 +143,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ - err |= __put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]); + unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out); #else /* CONFIG_ALTIVEC */ - err |= __put_user(0, &sc->v_regs); + unsafe_put_user(0, &sc->v_regs, efault_out); #endif /* CONFIG_ALTIVEC */ /* copy fpr regs and fpscr */ - err |= copy_fpr_to_user(&sc->fp_regs, tsk); + unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out); /* * Clear the MSR VSX bit to indicate there is no valid state attached @@ -160,24 +163,27 @@ static long setup_sigcontext(struct sigcontext __user *sc, */ if (tsk->thread.used_vsr && ctx_has_vsx_region) { v_regs += ELF_NVRREG; - err |= copy_vsx_to_user(v_regs, tsk); + unsafe_copy_vsx_to_user(v_regs, tsk, efault_out); /* set MSR_VSX in the MSR value in the frame to * indicate that sc->vs_reg) contains valid data. */ msr |= MSR_VSX; } #endif /* CONFIG_VSX */ - err |= __put_user(&sc->gp_regs, &sc->regs); + unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out); WARN_ON(!FULL_REGS(regs)); - err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); - err |= __put_user(msr, &sc->gp_regs[PT_MSR]); - err |= __put_user(softe, &sc->gp_regs[PT_SOFTE]); - err |= __put_user(signr, &sc->signal); - err |= __put_user(handler, &sc->handler); + unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out); + unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); + unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out); + unsafe_put_user(signr, &sc->signal, efault_out); + unsafe_put_user(handler, &sc->handler, efault_out); if (set != NULL) - err |= __put_user(set->sig[0], &sc->oldmask); + unsafe_put_user(set->sig[0], &sc->oldmask, efault_out); - return
[PATCH v4 05/10] powerpc/signal64: Remove TM ifdefery in middle of if/else block
Rework the messy ifdef breaking up the if-else for TM similar to commit f1cf4f93de2f ("powerpc/signal32: Remove ifdefery in middle of if/else"). Unlike that commit for ppc32, the ifdef can't be removed entirely since uc_transact in sigframe depends on CONFIG_PPC_TRANSACTIONAL_MEM. Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 16 +++- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index b211a8ea4f6e..8e1d804ce552 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -710,9 +710,7 @@ SYSCALL_DEFINE0(rt_sigreturn) struct pt_regs *regs = current_pt_regs(); struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; -#endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -765,7 +763,10 @@ SYSCALL_DEFINE0(rt_sigreturn) if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; +#endif + if (MSR_TM_ACTIVE(msr)) { +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* We recheckpoint on return. */ struct ucontext __user *uc_transact; @@ -778,9 +779,8 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; - } else #endif - { + } else { /* * Fall through, for non-TM restore * @@ -818,10 +818,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, unsigned long newsp = 0; long err = 0; struct pt_regs *regs = tsk->thread.regs; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Save the thread's msr before get_tm_stackpointer() changes it */ unsigned long msr = regs->msr; -#endif frame = get_sigframe(ksig, tsk, sizeof(*frame), 0); if (!access_ok(frame, sizeof(*frame))) @@ -836,8 +834,9 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(msr)) { +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ @@ -847,9 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, msr); - } else #endif - { + } else { err |= __put_user(0, &frame->uc.uc_link); prepare_setup_sigcontext(tsk, 1); err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, -- 2.26.1
[PATCH v4 00/10] Improve signal performance on PPC64 with KUAP
As reported by Anton, there is a large penalty to signal handling performance on radix systems using KUAP. The signal handling code performs many user access operations, each of which needs to switch the KUAP permissions bit to open and then close user access. This involves a costly 'mtspr' operation [0]. There is existing work done on x86 and by Christopher Leroy for PPC32 to instead open up user access in "blocks" using user_*_access_{begin,end}. We can do the same in PPC64 to bring performance back up on KUAP-enabled radix and now also hash MMU systems [1]. Hash MMU KUAP support along with uaccess flush has landed in linuxppc/next since the last revision. This series also provides a large benefit on hash with KUAP. However, in the hash implementation of KUAP the user AMR is always restored during system_call_exception() which cannot be avoided. Fewer user access switches naturally also result in less uaccess flushing. The first two patches add some needed 'unsafe' versions of copy-from functions. While these do not make use of asm-goto they still allow for avoiding the repeated uaccess switches. The third patch moves functions called by setup_sigcontext() into a new prepare_setup_sigcontext() to simplify converting setup_sigcontext() into an 'unsafe' version which assumes an open uaccess window later. The fourth and fifths patches clean-up some of the Transactional Memory ifdef stuff to simplify using uaccess blocks later. The next two patches rewrite some of the signal64 helper functions to be 'unsafe'. Finally, the last three patches update the main signal handling functions to make use of the new 'unsafe' helpers and eliminate some additional uaccess switching. I used the will-it-scale signal1 benchmark to measure and compare performance [2]. The below results are from running a minimal kernel+initramfs QEMU/KVM guest on a POWER9 Blackbird: signal1_threads -t1 -s10 | | hash | radix | | --- | -- | -- | | linuxppc/next | 118693 | 133296 | | linuxppc/next w/o KUAP+KUEP | 228911 | 228654 | | unsafe-signal64 | 199443 | 234716 | [0]: https://github.com/linuxppc/issues/issues/277 [1]: https://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=196278 [2]: https://github.com/antonblanchard/will-it-scale/blob/master/tests/signal1.c v4: * Fix issues identified by Christophe Leroy (thanks for review) * Use __get_user() directly to copy the 8B sigset_t v3: * Rebase on latest linuxppc/next * Reword confusing commit messages * Add missing comma in macro in signal.h which broke compiles without CONFIG_ALTIVEC * Validate hash KUAP signal performance improvements v2: * Rebase on latest linuxppc/next + Christophe Leroy's PPC32 signal series * Simplify/remove TM ifdefery similar to PPC32 series and clean up the uaccess begin/end calls * Isolate non-inline functions so they are not called when uaccess window is open Christopher M. Riedl (8): powerpc/uaccess: Add unsafe_copy_from_user powerpc/signal: Add unsafe_copy_{vsx,fpr}_from_user() powerpc/signal64: Move non-inline functions out of setup_sigcontext() powerpc: Reference param in MSR_TM_ACTIVE() macro powerpc/signal64: Remove TM ifdefery in middle of if/else block powerpc/signal64: Replace setup_sigcontext() w/ unsafe_setup_sigcontext() powerpc/signal64: Replace restore_sigcontext() w/ unsafe_restore_sigcontext() powerpc/signal64: Use __get_user() to copy sigset_t Daniel Axtens (2): powerpc/signal64: Rewrite handle_rt_signal64() to minimise uaccess switches powerpc/signal64: Rewrite rt_sigreturn() to minimise uaccess switches arch/powerpc/include/asm/reg.h | 2 +- arch/powerpc/include/asm/uaccess.h | 3 + arch/powerpc/kernel/signal.h | 33 arch/powerpc/kernel/signal_64.c| 251 ++--- 4 files changed, 196 insertions(+), 93 deletions(-) -- 2.26.1
[PATCH v4 01/10] powerpc/uaccess: Add unsafe_copy_from_user
Just wrap __copy_tofrom_user() for the usual 'unsafe' pattern which takes in a label to goto on error. Signed-off-by: Christopher M. Riedl --- arch/powerpc/include/asm/uaccess.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 501c9a79038c..036e82eefac9 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -542,6 +542,9 @@ user_write_access_begin(const void __user *ptr, size_t len) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) +#define unsafe_copy_from_user(d, s, l, e) \ + unsafe_op_wrap(__copy_tofrom_user((__force void __user *)d, s, l), e) + #define unsafe_copy_to_user(d, s, l, e) \ do { \ u8 __user *_dst = (u8 __user *)(d); \ -- 2.26.1
[PATCH v4 02/10] powerpc/signal: Add unsafe_copy_{vsx, fpr}_from_user()
Reuse the "safe" implementation from signal.c except for calling unsafe_copy_from_user() to copy into a local buffer. Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal.h | 33 + 1 file changed, 33 insertions(+) diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 2559a681536e..c18402d625f1 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -53,6 +53,33 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from); &buf[i], label);\ } while (0) +#define unsafe_copy_fpr_from_user(task, from, label) do {\ + struct task_struct *__t = task; \ + u64 __user *__f = (u64 __user *)from; \ + u64 buf[ELF_NFPREG];\ + int i; \ + \ + unsafe_copy_from_user(buf, __f, ELF_NFPREG * sizeof(double),\ + label); \ + for (i = 0; i < ELF_NFPREG - 1; i++)\ + __t->thread.TS_FPR(i) = buf[i]; \ + __t->thread.fp_state.fpscr = buf[i];\ +} while (0) + +#define unsafe_copy_vsx_from_user(task, from, label) do {\ + struct task_struct *__t = task; \ + u64 __user *__f = (u64 __user *)from; \ + u64 buf[ELF_NVSRHALFREG]; \ + int i; \ + \ + unsafe_copy_from_user(buf, __f, \ + ELF_NVSRHALFREG * sizeof(double), \ + label); \ + for (i = 0; i < ELF_NVSRHALFREG ; i++) \ + __t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; \ +} while (0) + + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM #define unsafe_copy_ckfpr_to_user(to, task, label) do {\ struct task_struct *__t = task; \ @@ -80,6 +107,10 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from); unsafe_copy_to_user(to, (task)->thread.fp_state.fpr,\ ELF_NFPREG * sizeof(double), label) +#define unsafe_copy_fpr_from_user(task, from, label) \ + unsafe_copy_from_user((task)->thread.fp_state.fpr, from,\ + ELF_NFPREG * sizeof(double), label) + static inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { @@ -115,6 +146,8 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from) #else #define unsafe_copy_fpr_to_user(to, task, label) do { } while (0) +#define unsafe_copy_fpr_from_user(task, from, label) do { } while (0) + static inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { -- 2.26.1
[PATCH v4 09/10] powerpc/signal64: Rewrite rt_sigreturn() to minimise uaccess switches
From: Daniel Axtens Add uaccess blocks and use the 'unsafe' versions of functions doing user access where possible to reduce the number of times uaccess has to be opened/closed. Signed-off-by: Daniel Axtens Co-developed-by: Christopher M. Riedl Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 25 +++-- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index a471e97589a8..817b64e1e409 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -782,9 +782,13 @@ SYSCALL_DEFINE0(rt_sigreturn) * restore_tm_sigcontexts. */ regs->msr &= ~MSR_TS_MASK; +#endif - if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) + if (!user_read_access_begin(uc, sizeof(*uc))) goto badframe; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + unsafe_get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR], badframe_block); #endif if (MSR_TM_ACTIVE(msr)) { @@ -794,10 +798,12 @@ SYSCALL_DEFINE0(rt_sigreturn) /* Trying to start TM on non TM system */ if (!cpu_has_feature(CPU_FTR_TM)) - goto badframe; + goto badframe_block; + + unsafe_get_user(uc_transact, &uc->uc_link, badframe_block); + + user_read_access_end(); - if (__get_user(uc_transact, &uc->uc_link)) - goto badframe; if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; @@ -816,12 +822,9 @@ SYSCALL_DEFINE0(rt_sigreturn) * causing a TM bad thing. */ current->thread.regs->msr &= ~MSR_TS_MASK; - if (!user_read_access_begin(uc, sizeof(*uc))) - return -EFAULT; - if (__unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) { - user_read_access_end(); - goto badframe; - } + unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext, + badframe_block); + user_read_access_end(); } @@ -831,6 +834,8 @@ SYSCALL_DEFINE0(rt_sigreturn) set_thread_flag(TIF_RESTOREALL); return 0; +badframe_block: + user_read_access_end(); badframe: signal_fault(current, regs, "rt_sigreturn", uc); -- 2.26.1
[PATCH v4 04/10] powerpc: Reference param in MSR_TM_ACTIVE() macro
Unlike the other MSR_TM_* macros, MSR_TM_ACTIVE does not reference or use its parameter unless CONFIG_PPC_TRANSACTIONAL_MEM is defined. This causes an 'unused variable' compile warning unless the variable is also guarded with CONFIG_PPC_TRANSACTIONAL_MEM. Reference but do nothing with the argument in the macro to avoid a potential compile warning. Signed-off-by: Christopher M. Riedl --- arch/powerpc/include/asm/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e40a921d78f9..c5a3e856191c 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -124,7 +124,7 @@ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ #else -#define MSR_TM_ACTIVE(x) 0 +#define MSR_TM_ACTIVE(x) ((void)(x), 0) #endif #if defined(CONFIG_PPC_BOOK3S_64) -- 2.26.1
[PATCH v4 10/10] powerpc/signal64: Use __get_user() to copy sigset_t
Usually sigset_t is exactly 8B which is a "trivial" size and does not warrant using __copy_from_user(). Use __get_user() directly in anticipation of future work to remove the trivial size optimizations from __copy_from_user(). Calling __get_user() also results in a small boost to signal handling throughput here. Signed-off-by: Christopher M. Riedl --- arch/powerpc/kernel/signal_64.c | 14 -- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 817b64e1e409..42fdc4a7ff72 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -97,6 +97,14 @@ static void prepare_setup_sigcontext(struct task_struct *tsk, int ctx_has_vsx_re #endif /* CONFIG_VSX */ } +static inline int get_user_sigset(sigset_t *dst, const sigset_t *src) +{ + if (sizeof(sigset_t) <= 8) + return __get_user(dst->sig[0], &src->sig[0]); + else + return __copy_from_user(dst, src, sizeof(sigset_t)); +} + /* * Set up the sigcontext for the signal frame. */ @@ -701,8 +709,9 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, * We kill the task with a SIGSEGV in this situation. */ - if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) + if (get_user_sigset(&set, &new_ctx->uc_sigmask)) do_exit(SIGSEGV); + set_current_blocked(&set); if (!user_read_access_begin(new_ctx, ctx_size)) @@ -740,8 +749,9 @@ SYSCALL_DEFINE0(rt_sigreturn) if (!access_ok(uc, sizeof(*uc))) goto badframe; - if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) + if (get_user_sigset(&set, &uc->uc_sigmask)) goto badframe; + set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -- 2.26.1
Re: [PATCH 2/2] powerpc/vdso64: remove meaningless vgettimeofday.o build rule
On Thu, Dec 24, 2020 at 2:12 AM Masahiro Yamada wrote: > > VDSO64 is only built for the 64-bit kernel, hence vgettimeofday.o is > built by the generic rule in scripts/Makefile.build. > > This line does not provide anything useful. > > Signed-off-by: Masahiro Yamada Michael, please take a look at this too. > --- > > arch/powerpc/kernel/vdso64/Makefile | 2 -- > 1 file changed, 2 deletions(-) > > diff --git a/arch/powerpc/kernel/vdso64/Makefile > b/arch/powerpc/kernel/vdso64/Makefile > index b50b39fedf74..422addf394c7 100644 > --- a/arch/powerpc/kernel/vdso64/Makefile > +++ b/arch/powerpc/kernel/vdso64/Makefile > @@ -32,8 +32,6 @@ asflags-y := -D__VDSO64__ -s > targets += vdso64.lds > CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) > > -$(obj)/vgettimeofday.o: %.o: %.c FORCE > - > # link rule for the .so file, .lds has to be first > $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o > FORCE > $(call if_changed,vdso64ld_and_check) > -- > 2.27.0 > -- Best Regards Masahiro Yamada
Re: [PATCH 1/2] powerpc/vdso: fix unnecessary rebuilds of vgettimeofday.o
On Thu, Dec 24, 2020 at 2:12 AM Masahiro Yamada wrote: > > vgettimeofday.o is unnecessarily rebuilt. Adding it to 'targets' is not > enough to fix the issue. Kbuild is correctly rebuilding it because the > command line is changed. > > PowerPC builds each vdso directory twice; first in vdso_prepare to > generate vdso{32,64}-offsets.h, second as part of the ordinary build > process to embed vdso{32,64}.so.dbg into the kernel. > > The problem shows up when CONFIG_PPC_WERROR=y due to the following line > in arch/powerpc/Kbuild: > > subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror > > In the preparation stage, Kbuild directly visits the vdso directories, > hence it does not inherit subdir-ccflags-y. In the second descend, > Kbuild adds -Werror, which results in the command line flipping > with/without -Werror. > > It implies a potential danger; if a more critical flag that would impact > the resulted vdso, the offsets recorded in the headers might be different > from real offsets in the embedded vdso images. > > Removing the unneeded second descend solves the problem. > > Link: https://lore.kernel.org/linuxppc-dev/87tuslxhry@mpe.ellerman.id.au/ > Reported-by: Michael Ellerman > Signed-off-by: Masahiro Yamada > --- Michael, please take a look at this. The unneeded rebuild problem is still remaining. > > arch/powerpc/kernel/Makefile | 4 ++-- > arch/powerpc/kernel/vdso32/Makefile | 5 + > arch/powerpc/kernel/{vdso32 => }/vdso32_wrapper.S | 0 > arch/powerpc/kernel/vdso64/Makefile | 6 +- > arch/powerpc/kernel/{vdso64 => }/vdso64_wrapper.S | 0 > 5 files changed, 4 insertions(+), 11 deletions(-) > rename arch/powerpc/kernel/{vdso32 => }/vdso32_wrapper.S (100%) > rename arch/powerpc/kernel/{vdso64 => }/vdso64_wrapper.S (100%) > > diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile > index fe2ef598e2ea..79ee7750937d 100644 > --- a/arch/powerpc/kernel/Makefile > +++ b/arch/powerpc/kernel/Makefile > @@ -51,7 +51,7 @@ obj-y += ptrace/ > obj-$(CONFIG_PPC64)+= setup_64.o \ >paca.o nvram_64.o note.o syscall_64.o > obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o > -obj-$(CONFIG_VDSO32) += vdso32/ > +obj-$(CONFIG_VDSO32) += vdso32_wrapper.o > obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o > obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o > obj-$(CONFIG_PPC_DAWR) += dawr.o > @@ -60,7 +60,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o > obj-$(CONFIG_PPC_BOOK3S_64)+= mce.o mce_power.o > obj-$(CONFIG_PPC_BOOK3E_64)+= exceptions-64e.o idle_book3e.o > obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o > -obj-$(CONFIG_PPC64)+= vdso64/ > +obj-$(CONFIG_PPC64)+= vdso64_wrapper.o > obj-$(CONFIG_ALTIVEC) += vecemu.o > obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o > procfs-y := proc_powerpc.o > diff --git a/arch/powerpc/kernel/vdso32/Makefile > b/arch/powerpc/kernel/vdso32/Makefile > index 59aa2944ecae..42fc3de89b39 100644 > --- a/arch/powerpc/kernel/vdso32/Makefile > +++ b/arch/powerpc/kernel/vdso32/Makefile > @@ -30,7 +30,7 @@ CC32FLAGS += -m32 > KBUILD_CFLAGS := $(filter-out -mcmodel=medium,$(KBUILD_CFLAGS)) > endif > > -targets := $(obj-vdso32) vdso32.so.dbg > +targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o > obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) > > GCOV_PROFILE := n > @@ -46,9 +46,6 @@ obj-y += vdso32_wrapper.o > targets += vdso32.lds > CPPFLAGS_vdso32.lds += -P -C -Upowerpc > > -# Force dependency (incbin is bad) > -$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg > - > # link rule for the .so file, .lds has to be first > $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o > FORCE > $(call if_changed,vdso32ld_and_check) > diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S > b/arch/powerpc/kernel/vdso32_wrapper.S > similarity index 100% > rename from arch/powerpc/kernel/vdso32/vdso32_wrapper.S > rename to arch/powerpc/kernel/vdso32_wrapper.S > diff --git a/arch/powerpc/kernel/vdso64/Makefile > b/arch/powerpc/kernel/vdso64/Makefile > index d365810a689a..b50b39fedf74 100644 > --- a/arch/powerpc/kernel/vdso64/Makefile > +++ b/arch/powerpc/kernel/vdso64/Makefile > @@ -17,7 +17,7 @@ endif > > # Build rules > > -targets := $(obj-vdso64) vdso64.so.dbg > +targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o > obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) > > GCOV_PROFILE := n > @@ -29,15 +29,11 @@ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \ > -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both > asflags-y := -D__VDSO64__ -s > > -obj-y += vdso64_wrapper.o > targets += vdso64.lds > CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) > > $(obj)/vgettimeofday.o: %.o: %.c FORCE > > -# Force dependency (incbin is bad) > -$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
Re: [PATCH v15 09/10] arm64: Call kmalloc() to allocate DTB buffer
On 1/27/21 7:52 PM, Thiago Jung Bauermann wrote: Will Deacon writes: On Wed, Jan 27, 2021 at 09:59:38AM -0800, Lakshmi Ramasubramanian wrote: On 1/27/21 8:52 AM, Will Deacon wrote: Hi Will, On Fri, Jan 15, 2021 at 09:30:16AM -0800, Lakshmi Ramasubramanian wrote: create_dtb() function allocates kernel virtual memory for the device tree blob (DTB). This is not consistent with other architectures, such as powerpc, which calls kmalloc() for allocating memory for the DTB. Call kmalloc() to allocate memory for the DTB, and kfree() to free the allocated memory. Co-developed-by: Prakhar Srivastava Signed-off-by: Prakhar Srivastava Signed-off-by: Lakshmi Ramasubramanian --- arch/arm64/kernel/machine_kexec_file.c | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 7de9c47dee7c..51c40143d6fa 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -29,7 +29,7 @@ const struct kexec_file_ops * const kexec_file_loaders[] = { int arch_kimage_file_post_load_cleanup(struct kimage *image) { - vfree(image->arch.dtb); + kfree(image->arch.dtb); image->arch.dtb = NULL; vfree(image->arch.elf_headers); @@ -59,19 +59,21 @@ static int create_dtb(struct kimage *image, + cmdline_len + DTB_EXTRA_SPACE; for (;;) { - buf = vmalloc(buf_size); + buf = kmalloc(buf_size, GFP_KERNEL); Is there a functional need for this patch? I build the 'dtbs' target just now and sdm845-db845c.dtb is approaching 100K, which feels quite large for kmalloc(). Changing the allocation from vmalloc() to kmalloc() would help us further consolidate the DTB setup code for powerpc and arm64. Ok, but at the risk of allocation failure. Can powerpc use vmalloc() instead? I believe this patch stems from this suggestion by Rob Herring: This could be taken a step further and do the allocation of the new FDT. The difference is arm64 uses vmalloc and powerpc uses kmalloc. The arm64 version also retries with a bigger allocation. That seems unnecessary. in https://lore.kernel.org/linux-integrity/20201211221006.1052453-3-r...@kernel.org/ The problem is that this patch implements only part of the suggestion, which isn't useful in itself. So the patch series should either drop this patch or consolidate the FDT allocation between the arches. I just tested on powernv and pseries platforms and powerpc can use vmalloc for the FDT buffer. Thanks for verifying on powerpc platform Thiago. I'll update the patch to do the following: => Use vmalloc for FDT buffer allocation on powerpc => Keep vmalloc for arm64, but remove the retry on allocation. => Also, there was a memory leak of FDT buffer in the error code path on arm64, which I'll fix as well. Did I miss anything? thanks, -lakshmi
Re: [PATCH v15 09/10] arm64: Call kmalloc() to allocate DTB buffer
Will Deacon writes: > On Wed, Jan 27, 2021 at 09:59:38AM -0800, Lakshmi Ramasubramanian wrote: >> On 1/27/21 8:52 AM, Will Deacon wrote: >> >> Hi Will, >> >> > On Fri, Jan 15, 2021 at 09:30:16AM -0800, Lakshmi Ramasubramanian wrote: >> > > create_dtb() function allocates kernel virtual memory for >> > > the device tree blob (DTB). This is not consistent with other >> > > architectures, such as powerpc, which calls kmalloc() for allocating >> > > memory for the DTB. >> > > >> > > Call kmalloc() to allocate memory for the DTB, and kfree() to free >> > > the allocated memory. >> > > >> > > Co-developed-by: Prakhar Srivastava >> > > Signed-off-by: Prakhar Srivastava >> > > Signed-off-by: Lakshmi Ramasubramanian >> > > --- >> > > arch/arm64/kernel/machine_kexec_file.c | 12 +++- >> > > 1 file changed, 7 insertions(+), 5 deletions(-) >> > > >> > > diff --git a/arch/arm64/kernel/machine_kexec_file.c >> > > b/arch/arm64/kernel/machine_kexec_file.c >> > > index 7de9c47dee7c..51c40143d6fa 100644 >> > > --- a/arch/arm64/kernel/machine_kexec_file.c >> > > +++ b/arch/arm64/kernel/machine_kexec_file.c >> > > @@ -29,7 +29,7 @@ const struct kexec_file_ops * const >> > > kexec_file_loaders[] = { >> > > int arch_kimage_file_post_load_cleanup(struct kimage *image) >> > > { >> > > -vfree(image->arch.dtb); >> > > +kfree(image->arch.dtb); >> > > image->arch.dtb = NULL; >> > > vfree(image->arch.elf_headers); >> > > @@ -59,19 +59,21 @@ static int create_dtb(struct kimage *image, >> > > + cmdline_len + DTB_EXTRA_SPACE; >> > > for (;;) { >> > > -buf = vmalloc(buf_size); >> > > +buf = kmalloc(buf_size, GFP_KERNEL); >> > >> > Is there a functional need for this patch? I build the 'dtbs' target just >> > now and sdm845-db845c.dtb is approaching 100K, which feels quite large >> > for kmalloc(). >> >> Changing the allocation from vmalloc() to kmalloc() would help us further >> consolidate the DTB setup code for powerpc and arm64. > > Ok, but at the risk of allocation failure. Can powerpc use vmalloc() > instead? I believe this patch stems from this suggestion by Rob Herring: > This could be taken a step further and do the allocation of the new > FDT. The difference is arm64 uses vmalloc and powerpc uses kmalloc. The > arm64 version also retries with a bigger allocation. That seems > unnecessary. in https://lore.kernel.org/linux-integrity/20201211221006.1052453-3-r...@kernel.org/ The problem is that this patch implements only part of the suggestion, which isn't useful in itself. So the patch series should either drop this patch or consolidate the FDT allocation between the arches. I just tested on powernv and pseries platforms and powerpc can use vmalloc for the FDT buffer. -- Thiago Jung Bauermann IBM Linux Technology Center
Re: [PATCH v11 01/13] mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page
On 2021/1/26 12:44, Nicholas Piggin wrote: > vmalloc_to_page returns NULL for addresses mapped by larger pages[*]. > Whether or not a vmap is huge depends on the architecture details, > alignments, boot options, etc., which the caller can not be expected > to know. Therefore HUGE_VMAP is a regression for vmalloc_to_page. > > This change teaches vmalloc_to_page about larger pages, and returns > the struct page that corresponds to the offset within the large page. > This makes the API agnostic to mapping implementation details. > > [*] As explained by commit 029c54b095995 ("mm/vmalloc.c: huge-vmap: > fail gracefully on unexpected huge vmap mappings") > > Reviewed-by: Christoph Hellwig > Signed-off-by: Nicholas Piggin > --- > mm/vmalloc.c | 41 ++--- > 1 file changed, 26 insertions(+), 15 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index e6f352bf0498..62372f9e0167 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -34,7 +34,7 @@ > #include > #include > #include > - > +#include > #include > #include > #include > @@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x) > } > > /* > - * Walk a vmap address to the struct page it maps. > + * Walk a vmap address to the struct page it maps. Huge vmap mappings will > + * return the tail page that corresponds to the base page address, which > + * matches small vmap mappings. > */ > struct page *vmalloc_to_page(const void *vmalloc_addr) > { > @@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) > > if (pgd_none(*pgd)) > return NULL; > + if (WARN_ON_ONCE(pgd_leaf(*pgd))) > + return NULL; /* XXX: no allowance for huge pgd */ > + if (WARN_ON_ONCE(pgd_bad(*pgd))) > + return NULL; > + > p4d = p4d_offset(pgd, addr); > if (p4d_none(*p4d)) > return NULL; > - pud = pud_offset(p4d, addr); > + if (p4d_leaf(*p4d)) > + return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); > + if (WARN_ON_ONCE(p4d_bad(*p4d))) > + return NULL; > > - /* > - * Don't dereference bad PUD or PMD (below) entries. This will also > - * identify huge mappings, which we may encounter on architectures > - * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be > - * identified as vmalloc addresses by is_vmalloc_addr(), but are > - * not [unambiguously] associated with a struct page, so there is > - * no correct value to return for them. > - */ > - WARN_ON_ONCE(pud_bad(*pud)); > - if (pud_none(*pud) || pud_bad(*pud)) > + pud = pud_offset(p4d, addr); > + if (pud_none(*pud)) > + return NULL; > + if (pud_leaf(*pud)) > + return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); Hi Nicho: /builds/1mzfdQzleCy69KZFb5qHNSEgabZ/mm/vmalloc.c: In function 'vmalloc_to_page': /builds/1mzfdQzleCy69KZFb5qHNSEgabZ/include/asm-generic/pgtable-nop4d-hack.h:48:27: error: implicit declaration of function 'pud_page'; did you mean 'put_page'? [-Werror=implicit-function-declaration] 48 | #define pgd_page(pgd)(pud_page((pud_t){ pgd })) | ^~~~ the pug_page is not defined for aarch32 when enabling 2-level page config, it break the system building. > + if (WARN_ON_ONCE(pud_bad(*pud))) > return NULL; > + > pmd = pmd_offset(pud, addr); > - WARN_ON_ONCE(pmd_bad(*pmd)); > - if (pmd_none(*pmd) || pmd_bad(*pmd)) > + if (pmd_none(*pmd)) > + return NULL; > + if (pmd_leaf(*pmd)) > + return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); > + if (WARN_ON_ONCE(pmd_bad(*pmd))) > return NULL; > > ptep = pte_offset_map(pmd, addr); > @@ -389,6 +399,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) > if (pte_present(pte)) > page = pte_page(pte); > pte_unmap(ptep); > + > return page; > } > EXPORT_SYMBOL(vmalloc_to_page); >
Re: [PATCH] powerpc/fault: fix wrong KUAP fault for IO_URING
On 1/27/21 8:13 PM, Zorro Lang wrote: > On Thu, Jan 28, 2021 at 10:18:07AM +1000, Nicholas Piggin wrote: >> Excerpts from Jens Axboe's message of January 28, 2021 5:29 am: >>> On 1/27/21 9:38 AM, Christophe Leroy wrote: Le 27/01/2021 à 15:56, Zorro Lang a écrit : > On powerpc, io_uring test hit below KUAP fault on __do_page_fault. > The fail source line is: > >if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, > is_write))) >return SIGSEGV; > > The is_user() is based on user_mod(regs) only. This's not suit for > io_uring, where the helper thread can assume the user app identity > and could perform this fault just fine. So turn to use mm to decide > if this is valid or not. I don't understand why testing is_user would be an issue. KUAP purpose it to block any unallowed access from kernel to user memory (Equivalent to SMAP on x86). So it really must be based on MSR_PR bit, that is what is_user provides. If the kernel access is legitimate, kernel should have opened userspace access then you shouldn't get this "Bug: Read fault blocked by KUAP!". As far as I understand, the fault occurs in iov_iter_fault_in_readable() which calls fault_in_pages_readable() And fault_in_pages_readable() uses __get_user() so it is a legitimate access and you really should get a KUAP fault. So the problem is somewhere else, I think you proposed patch just hides the problem, it doesn't fix it. >>> >>> If we do kthread_use_mm(), can we agree that the user access is valid? >> >> Yeah the io uring code is fine, provided it uses the uaccess primitives >> like any other kernel code. It's looking more like a an arch/powerpc bug. >> >>> We should be able to copy to/from user space, and including faults, if >>> that's been done and the new mm assigned. Because it really should be. >>> If SMAP was a problem on x86, we would have seen it long ago. >>> >>> I'm assuming this may be breakage related to the recent uaccess changes >>> related to set_fs and friends? Or maybe recent changes on the powerpc >>> side? >>> >>> Zorro, did 5.10 work? >> >> Would be interesting to know. > > Sure Nick and Jens, which 5.10 rc? version do you want to know ? Or any git > commit(be the HEAD) in 5.10 phase? I forget which versions had what series of this, but 5.10 final - and if that fails, then 5.9 final. IIRC, 5.9 was pre any of these changes, and 5.10 definitely has them. -- Jens Axboe
Re: [PATCH] powerpc/fault: fix wrong KUAP fault for IO_URING
On Thu, Jan 28, 2021 at 10:18:07AM +1000, Nicholas Piggin wrote: > Excerpts from Jens Axboe's message of January 28, 2021 5:29 am: > > On 1/27/21 9:38 AM, Christophe Leroy wrote: > >> > >> > >> Le 27/01/2021 à 15:56, Zorro Lang a écrit : > >>> On powerpc, io_uring test hit below KUAP fault on __do_page_fault. > >>> The fail source line is: > >>> > >>>if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, > >>> is_write))) > >>>return SIGSEGV; > >>> > >>> The is_user() is based on user_mod(regs) only. This's not suit for > >>> io_uring, where the helper thread can assume the user app identity > >>> and could perform this fault just fine. So turn to use mm to decide > >>> if this is valid or not. > >> > >> I don't understand why testing is_user would be an issue. KUAP purpose > >> it to block any unallowed access from kernel to user memory > >> (Equivalent to SMAP on x86). So it really must be based on MSR_PR bit, > >> that is what is_user provides. > >> > >> If the kernel access is legitimate, kernel should have opened > >> userspace access then you shouldn't get this "Bug: Read fault blocked > >> by KUAP!". > >> > >> As far as I understand, the fault occurs in > >> iov_iter_fault_in_readable() which calls fault_in_pages_readable() And > >> fault_in_pages_readable() uses __get_user() so it is a legitimate > >> access and you really should get a KUAP fault. > >> > >> So the problem is somewhere else, I think you proposed patch just > >> hides the problem, it doesn't fix it. > > > > If we do kthread_use_mm(), can we agree that the user access is valid? > > Yeah the io uring code is fine, provided it uses the uaccess primitives > like any other kernel code. It's looking more like a an arch/powerpc bug. > > > We should be able to copy to/from user space, and including faults, if > > that's been done and the new mm assigned. Because it really should be. > > If SMAP was a problem on x86, we would have seen it long ago. > > > > I'm assuming this may be breakage related to the recent uaccess changes > > related to set_fs and friends? Or maybe recent changes on the powerpc > > side? > > > > Zorro, did 5.10 work? > > Would be interesting to know. Sure Nick and Jens, which 5.10 rc? version do you want to know ? Or any git commit(be the HEAD) in 5.10 phase? Thanks, Zorro > > Thanks, > Nick >
Re: [PATCH v11 04/13] mm/ioremap: rename ioremap_*_range to vmap_*_range
Hi: On 2021/1/26 12:45, Nicholas Piggin wrote: > This will be used as a generic kernel virtual mapping function, so > re-name it in preparation. > Looks good to me. Thanks. Reviewed-by: Miaohe Lin > Signed-off-by: Nicholas Piggin > --- > mm/ioremap.c | 64 +++- > 1 file changed, 33 insertions(+), 31 deletions(-) > > diff --git a/mm/ioremap.c b/mm/ioremap.c > index 5fa1ab41d152..3f4d36f9745a 100644 > --- a/mm/ioremap.c > +++ b/mm/ioremap.c > @@ -61,9 +61,9 @@ static inline int ioremap_pud_enabled(void) { return 0; } > static inline int ioremap_pmd_enabled(void) { return 0; } > #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ > > -static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, > - unsigned long end, phys_addr_t phys_addr, pgprot_t prot, > - pgtbl_mod_mask *mask) > +static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, > + phys_addr_t phys_addr, pgprot_t prot, > + pgtbl_mod_mask *mask) > { > pte_t *pte; > u64 pfn; > @@ -81,9 +81,8 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, > return 0; > } > > -static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, > - unsigned long end, phys_addr_t phys_addr, > - pgprot_t prot) > +static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long > end, > + phys_addr_t phys_addr, pgprot_t prot) > { > if (!ioremap_pmd_enabled()) > return 0; > @@ -103,9 +102,9 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long > addr, > return pmd_set_huge(pmd, phys_addr, prot); > } > > -static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, > - unsigned long end, phys_addr_t phys_addr, pgprot_t prot, > - pgtbl_mod_mask *mask) > +static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, > + phys_addr_t phys_addr, pgprot_t prot, > + pgtbl_mod_mask *mask) > { > pmd_t *pmd; > unsigned long next; > @@ -116,20 +115,19 @@ static inline int ioremap_pmd_range(pud_t *pud, > unsigned long addr, > do { > next = pmd_addr_end(addr, end); > > - if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { > + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { > *mask |= PGTBL_PMD_MODIFIED; > continue; > } > > - if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask)) > + if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) > return -ENOMEM; > } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); > return 0; > } > > -static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, > - unsigned long end, phys_addr_t phys_addr, > - pgprot_t prot) > +static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long > end, > + phys_addr_t phys_addr, pgprot_t prot) > { > if (!ioremap_pud_enabled()) > return 0; > @@ -149,9 +147,9 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long > addr, > return pud_set_huge(pud, phys_addr, prot); > } > > -static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, > - unsigned long end, phys_addr_t phys_addr, pgprot_t prot, > - pgtbl_mod_mask *mask) > +static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, > + phys_addr_t phys_addr, pgprot_t prot, > + pgtbl_mod_mask *mask) > { > pud_t *pud; > unsigned long next; > @@ -162,20 +160,19 @@ static inline int ioremap_pud_range(p4d_t *p4d, > unsigned long addr, > do { > next = pud_addr_end(addr, end); > > - if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) { > + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) { > *mask |= PGTBL_PUD_MODIFIED; > continue; > } > > - if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask)) > + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask)) > return -ENOMEM; > } while (pud++, phys_addr += (next - addr), addr = next, addr != end); > return 0; > } > > -static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, > - unsigned long end, phys_addr_t phys_addr, > - pgprot_t prot) > +static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long > end, > + phys_addr_t phys_addr, pgprot_t prot) > { > if (!ioremap_p4d_enabled()) > return
Re: [PATCH 01/27] scripts: add generic syscalltbl.sh
On Thu, Jan 28, 2021 at 9:51 AM Masahiro Yamada wrote: > > Most of architectures generate syscall headers at the compile time > in the almost same way. > > The syscall table has the same format for all architectures. Each line > has 3, 4 or 5 fields; syscall number, ABI, syscall name, native entry > point, and compat entry point. The syscall table is processed by > syscalltbl.sh script into header files. > > Despite the same pattern, scripts are maintained per architecture, > which results in code duplication and bad maintainability. > > As of v5.11-rc1, 12 architectures duplicate similar shell scripts: > > $ find arch -name syscalltbl.sh | sort > arch/alpha/kernel/syscalls/syscalltbl.sh > arch/arm/tools/syscalltbl.sh > arch/ia64/kernel/syscalls/syscalltbl.sh > arch/m68k/kernel/syscalls/syscalltbl.sh > arch/microblaze/kernel/syscalls/syscalltbl.sh > arch/mips/kernel/syscalls/syscalltbl.sh > arch/parisc/kernel/syscalls/syscalltbl.sh > arch/powerpc/kernel/syscalls/syscalltbl.sh > arch/sh/kernel/syscalls/syscalltbl.sh > arch/sparc/kernel/syscalls/syscalltbl.sh > arch/x86/entry/syscalls/syscalltbl.sh > arch/xtensa/kernel/syscalls/syscalltbl.sh > > My goal is to unify them into a single file, scripts/syscalltbl.sh. > > For example, the i386 syscall table looks like this: > > 0 i386 restart_syscall sys_restart_syscall > 1 i386 exit sys_exit > 2 i386 fork sys_fork > 3 i386 read sys_read > 4 i386 writesys_write > 5 i386 open sys_open compat_sys_open > ... > > scripts/syscalltbl.sh generates the following code: > > __SYSCALL(0, sys_restart_syscall) > __SYSCALL(1, sys_exit) > __SYSCALL(2, sys_fork) > __SYSCALL(3, sys_read) > __SYSCALL(4, sys_write) > __SYSCALL_WITH_COMPAT(5, sys_open, compat_sys_open) > ... > > Then, the i386 kernel will do: > > #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) > > and the x86_64 kernel will do: > > #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) > > I noticed all 32/64 bit architectures can be covered by the same > pattern. Having an arch-specific script is fine if there is a good > reason to do so, but a single generic script should work for this case. > > Signed-off-by: Masahiro Yamada > --- > > scripts/syscalltbl.sh | 52 +++ > 1 file changed, 52 insertions(+) > create mode 100644 scripts/syscalltbl.sh > > diff --git a/scripts/syscalltbl.sh b/scripts/syscalltbl.sh > new file mode 100644 > index ..15bf4e09f88c > --- /dev/null > +++ b/scripts/syscalltbl.sh > @@ -0,0 +1,52 @@ > +#!/bin/sh > +# SPDX-License-Identifier: GPL-2.0-only > +# > +# Usage: > +# scripts/syscalltbl.sh INFILE OUTFILE [ABIS] [OFFSET] > +# > +# INFILE: input syscall table > +# OUTFILE: output file > +# ABIS (optional): specify the ABIs to handle. > +# If omitted, all lines are handled. > +# OFFSET (optinal): spefify the offset of the syscall numbers. > +# If omitted, the offset is zero. > +# > +# The syscall table format: > +# nr abi name native [compat] This line should be nr abi name [native] [compat] because the native entry point is also optional. (if it is missing, sys_ni_syscall is used) > +# > +# nr: syscall number > +# abi: ABI name > +# name: syscall name > +# native: native entry point native (optional): native entry point > +# compat (optional): compat entry point > + > +set -e > + > +in="$1" > +out="$2" > +abis=$(echo "($3)" | tr ',' '|') > +offset="${4:-0}" > + > +nxt=$offset > + > +grep -E "^[0-9]+[[:space:]]+${abis}" "$in" | sort -n | { > + > + while read nr abi name native compat ; do > + > + nr=$((nr + $offset)) > + > + while [ $nxt -lt $nr ]; do > + echo "__SYSCALL($nxt, sys_ni_syscall)" > + nxt=$((nxt + 1)) > + done > + > + if [ -n "$compat" ]; then > + echo "__SYSCALL_WITH_COMPAT($nr, $native, $compat)" > + elif [ -n "$native" ]; then > + echo "__SYSCALL($nr, $native)" > + else > + echo "__SYSCALL($nr, sys_ni_syscall)" > + fi > + nxt=$((nr + 1)) > + done > +} > "$out" > -- > 2.27.0 > -- Best Regards Masahiro Yamada
Re: [PATCH 02/27] x86/syscalls: fix -Wmissing-prototypes warnings from COND_SYSCALL()
On Thu, Jan 28, 2021 at 9:52 AM Masahiro Yamada wrote: > > Building kernel/sys_ni.c with W=1 omits tons of -Wmissing-prototypes This is a typo. "omits" -> "emits" > warnings. > > $ make W=1 kernel/sys_ni.o > [ snip ] > CC kernel/sys_ni.o > In file included from kernel/sys_ni.c:10: > ./arch/x86/include/asm/syscall_wrapper.h:83:14: warning: no previous > prototype for '__x64_sys_io_setup' [-Wmissing-prototypes] >83 | __weak long __##abi##_##name(const struct pt_regs *__unused) \ > | ^~ > ./arch/x86/include/asm/syscall_wrapper.h:100:2: note: in expansion of macro > '__COND_SYSCALL' > 100 | __COND_SYSCALL(x64, sys_##name) > | ^~ > ./arch/x86/include/asm/syscall_wrapper.h:256:2: note: in expansion of macro > '__X64_COND_SYSCALL' > 256 | __X64_COND_SYSCALL(name) \ > | ^~ > kernel/sys_ni.c:39:1: note: in expansion of macro 'COND_SYSCALL' >39 | COND_SYSCALL(io_setup); > | ^~~~ > ./arch/x86/include/asm/syscall_wrapper.h:83:14: warning: no previous > prototype for '__ia32_sys_io_setup' [-Wmissing-prototypes] >83 | __weak long __##abi##_##name(const struct pt_regs *__unused) \ > | ^~ > ./arch/x86/include/asm/syscall_wrapper.h:120:2: note: in expansion of macro > '__COND_SYSCALL' > 120 | __COND_SYSCALL(ia32, sys_##name) > | ^~ > ./arch/x86/include/asm/syscall_wrapper.h:257:2: note: in expansion of macro > '__IA32_COND_SYSCALL' > 257 | __IA32_COND_SYSCALL(name) > | ^~~ > kernel/sys_ni.c:39:1: note: in expansion of macro 'COND_SYSCALL' >39 | COND_SYSCALL(io_setup); > | ^~~~ > ... > > __SYS_STUB0() and __SYS_STUBx() defined a few lines above have forward > declarations. Let's do likewise for __COND_SYSCALL() to fix the > warnings. > > Signed-off-by: Masahiro Yamada > --- > > arch/x86/include/asm/syscall_wrapper.h | 1 + > 1 file changed, 1 insertion(+) > > diff --git a/arch/x86/include/asm/syscall_wrapper.h > b/arch/x86/include/asm/syscall_wrapper.h > index a84333adeef2..80c08c7d5e72 100644 > --- a/arch/x86/include/asm/syscall_wrapper.h > +++ b/arch/x86/include/asm/syscall_wrapper.h > @@ -80,6 +80,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs > *regs); > } > > #define __COND_SYSCALL(abi, name) \ > + __weak long __##abi##_##name(const struct pt_regs *__unused); \ > __weak long __##abi##_##name(const struct pt_regs *__unused)\ > { \ > return sys_ni_syscall();\ > -- > 2.27.0 > -- Best Regards Masahiro Yamada
[PATCH 20/27] sh: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts sh to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/sh/kernel/syscalls/Makefile | 7 ++ arch/sh/kernel/syscalls/syscalltbl.sh | 32 --- 2 files changed, 2 insertions(+), 37 deletions(-) delete mode 100644 arch/sh/kernel/syscalls/syscalltbl.sh diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile index 1c42d2d2926d..6610130c67bc 100644 --- a/arch/sh/kernel/syscalls/Makefile +++ b/arch/sh/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 904b8e6e625d.. --- a/arch/sh/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" -- 2.27.0
[PATCH 16/27] mips: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts mips to use scripts/syscalltbl.sh. This also unifies syscall_table_32_o32.h and syscall_table_64_o32.h into syscall_table_o32.h. Signed-off-by: Masahiro Yamada --- arch/mips/include/asm/Kbuild| 7 +++-- arch/mips/kernel/scall32-o32.S | 4 +-- arch/mips/kernel/scall64-n32.S | 3 +-- arch/mips/kernel/scall64-n64.S | 3 +-- arch/mips/kernel/scall64-o32.S | 4 +-- arch/mips/kernel/syscalls/Makefile | 34 --- arch/mips/kernel/syscalls/syscalltbl.sh | 36 - 7 files changed, 20 insertions(+), 71 deletions(-) delete mode 100644 arch/mips/kernel/syscalls/syscalltbl.sh diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 95b4fa7bd0d1..70b15857369d 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,9 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # MIPS headers -generated-y += syscall_table_32_o32.h -generated-y += syscall_table_64_n32.h -generated-y += syscall_table_64_n64.h -generated-y += syscall_table_64_o32.h +generated-y += syscall_table_n32.h +generated-y += syscall_table_n64.h +generated-y += syscall_table_o32.h generic-y += export.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index b449b68662a9..84e8624e83a2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -217,9 +217,9 @@ einval: li v0, -ENOSYS #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) PTR entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 35d8c86b160e..f650c55a17dc 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -104,5 +104,4 @@ not_n32_scall: #define __SYSCALL(nr, entry) PTR entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index 23b2e2b1609c..c71c13f9fcbc 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -113,5 +113,4 @@ illegal_syscall: .align 3 .type sys_call_table, @object EXPORT(sys_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 50c9a57e0d3a..cedc8bd88804 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -213,9 +213,9 @@ einval: li v0, -ENOSYS jr ra END(sys32_syscall) +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL(nr, entry) PTR entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index f15842bda464..265dab4253ab 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -10,7 +10,7 @@ syscalln64 := $(srctree)/$(src)/syscall_n64.tbl syscallo32 := $(srctree)/$(src)/syscall_o32.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -25,10 +25,7 @@ quiet_cmd_sysnr = SYSNR $@ '$(sysnr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ "" $(offset) syshdr_offset_unistd_n32 := __NR_Linux $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE @@ -57,24 +54,16 @@ sysnr_offset_unistd_nr_o32 := 4000 $(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_syscall_table_32_o32 := 32_o32 -systbl_offset_syscall_table_32_o32 := 4000 -$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_n32.h: offset := 6000 +$(kapi)/syscall_table_n32.h: $(syscalln32) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n32 := 64_n32 -systbl_offset_syscall_table_64_n32 := 6000 -$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(
[PATCH 15/27] mips: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/mips/kernel/syscalls/Makefile | 27 ++- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index 6efb2f6889a7..f15842bda464 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -31,50 +31,50 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_offset_unistd_n32 := __NR_Linux -$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) +$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_offset_unistd_n64 := __NR_Linux -$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) +$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_offset_unistd_o32 := __NR_Linux -$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) +$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE $(call if_changed,syshdr) sysnr_pfx_unistd_nr_n32 := N32 sysnr_offset_unistd_nr_n32 := 6000 -$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) +$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) FORCE $(call if_changed,sysnr) sysnr_pfx_unistd_nr_n64 := 64 sysnr_offset_unistd_nr_n64 := 5000 -$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) +$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) FORCE $(call if_changed,sysnr) sysnr_pfx_unistd_nr_o32 := O32 sysnr_offset_unistd_nr_o32 := 4000 -$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) +$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE $(call if_changed,sysnr) systbl_abi_syscall_table_32_o32 := 32_o32 systbl_offset_syscall_table_32_o32 := 4000 -$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) +$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) systbl_abi_syscall_table_64_n32 := 64_n32 systbl_offset_syscall_table_64_n32 := 6000 -$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) +$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE $(call if_changed,systbl) systbl_abi_syscall_table_64_n64 := 64_n64 systbl_offset_syscall_table_64_n64 := 5000 -$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) +$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE $(call if_changed,systbl) systbl_abi_syscall_table_64_o32 := 64_o32 systbl_offset_syscall_table_64_o32 := 4000 -$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) +$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_n32.h \ @@ -88,9 +88,10 @@ kapisyshdr-y += syscall_table_32_o32.h \ syscall_table_64_n64.h \ syscall_table_64_o32.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 25/27] powerpc: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts powerpc to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/powerpc/include/asm/Kbuild | 1 - arch/powerpc/kernel/syscalls/Makefile | 22 +++-- arch/powerpc/kernel/syscalls/syscalltbl.sh | 36 - arch/powerpc/kernel/systbl.S| 5 ++- arch/powerpc/platforms/cell/spu_callbacks.c | 2 +- 5 files changed, 10 insertions(+), 56 deletions(-) delete mode 100644 arch/powerpc/kernel/syscalls/syscalltbl.sh diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index e1f9b4ea1c53..bcf95ce0964f 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generated-y += syscall_table_spu.h generic-y += export.h generic-y += kvm_types.h diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile index d609f0040b2a..ecf1474f8ea5 100644 --- a/arch/powerpc/kernel/syscalls/Makefile +++ b/arch/powerpc/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(abis) syshdr_abis_unistd_32 := common,nospu,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE @@ -29,30 +26,21 @@ syshdr_abis_unistd_64 := common,nospu,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,nospu,32 -systbl_abi_syscall_table_32 := 32 +$(kapi)/syscall_table_32.h: abis := common,nospu,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,nospu,64 -systbl_abi_syscall_table_64 := 64 +$(kapi)/syscall_table_64.h: abis := common,nospu,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,nospu,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - -systbl_abis_syscall_table_spu := common,spu -systbl_abi_syscall_table_spu := spu +$(kapi)/syscall_table_spu.h: abis := common,spu $(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ syscall_table_64.h \ - syscall_table_c32.h \ syscall_table_spu.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index f7393a7b18aa.. --- a/arch/powerpc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index d34276f3c495..cb3358886203 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -21,6 +21,7 @@ #define __SYSCALL(nr, entry) .long entry #endif +#defi
[PATCH 22/27] sparc: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/sparc/kernel/syscalls/Makefile | 17 + 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile index c22a21c39f30..556fe30a6c8f 100644 --- a/arch/sparc/kernel/syscalls/Makefile +++ b/arch/sparc/kernel/syscalls/Makefile @@ -22,24 +22,24 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_abis_unistd_32 := common,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abis_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_abis_syscall_table_32 := common,32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_64 := common,64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_c32 := common,32 systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h @@ -47,9 +47,10 @@ kapisyshdr-y += syscall_table_32.h \ syscall_table_64.h \ syscall_table_c32.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 26/27] xtensa: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/xtensa/kernel/syscalls/Makefile | 11 ++- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile index 659faefdcb1d..1c42d2d2926d 100644 --- a/arch/xtensa/kernel/syscalls/Makefile +++ b/arch/xtensa/kernel/syscalls/Makefile @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))'\ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 21/27] sparc: remove wrong comment from arch/sparc/include/asm/Kbuild
These are NOT exported to userspace. The headers listed in arch/sparc/include/uapi/asm/Kbuild are exported. Signed-off-by: Masahiro Yamada --- arch/sparc/include/asm/Kbuild | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 3688fdae50e4..aec20406145e 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -# User exported sparc header files - generated-y += syscall_table_32.h generated-y += syscall_table_64.h generated-y += syscall_table_c32.h -- 2.27.0
[PATCH 19/27] sh: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/sh/kernel/syscalls/Makefile | 11 ++- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile index 659faefdcb1d..1c42d2d2926d 100644 --- a/arch/sh/kernel/syscalls/Makefile +++ b/arch/sh/kernel/syscalls/Makefile @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))'\ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 13/27] microblaze: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/microblaze/kernel/syscalls/Makefile | 11 ++- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile index 659faefdcb1d..1c42d2d2926d 100644 --- a/arch/microblaze/kernel/syscalls/Makefile +++ b/arch/microblaze/kernel/syscalls/Makefile @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))'\ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 24/27] powerpc: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/powerpc/kernel/syscalls/Makefile | 19 ++- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile index 27b48954808d..d609f0040b2a 100644 --- a/arch/powerpc/kernel/syscalls/Makefile +++ b/arch/powerpc/kernel/syscalls/Makefile @@ -22,31 +22,31 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_abis_unistd_32 := common,nospu,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abis_unistd_64 := common,nospu,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_abis_syscall_table_32 := common,nospu,32 systbl_abi_syscall_table_32 := 32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_64 := common,nospu,64 systbl_abi_syscall_table_64 := 64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_c32 := common,nospu,32 systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_spu := common,spu systbl_abi_syscall_table_spu := spu -$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) +$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h @@ -55,9 +55,10 @@ kapisyshdr-y += syscall_table_32.h \ syscall_table_c32.h \ syscall_table_spu.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 14/27] microblaze: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts microblaze to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/microblaze/kernel/syscall_table.S| 3 +- arch/microblaze/kernel/syscalls/Makefile | 7 ++-- arch/microblaze/kernel/syscalls/syscalltbl.sh | 32 --- 3 files changed, 3 insertions(+), 39 deletions(-) delete mode 100644 arch/microblaze/kernel/syscalls/syscalltbl.sh diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index ce006646f741..3bc60a2b159e 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL(nr, entry) .long entry ENTRY(sys_call_table) #include -#undef __SYSCALL diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile index 1c42d2d2926d..6610130c67bc 100644 --- a/arch/microblaze/kernel/syscalls/Makefile +++ b/arch/microblaze/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/microblaze/kernel/syscalls/syscalltbl.sh b/arch/microblaze/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad.. --- a/arch/microblaze/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" -- 2.27.0
[PATCH 11/27] m68k: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/m68k/kernel/syscalls/Makefile | 11 ++- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile index 659faefdcb1d..1c42d2d2926d 100644 --- a/arch/m68k/kernel/syscalls/Makefile +++ b/arch/m68k/kernel/syscalls/Makefile @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))'\ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 18/27] parisc: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts parisc to use scripts/syscalltbl.sh. This also unifies syscall_table_64.h and syscall_table_c32.h. Signed-off-by: Masahiro Yamada --- arch/parisc/include/asm/Kbuild| 1 - arch/parisc/kernel/syscall.S | 16 +- arch/parisc/kernel/syscalls/Makefile | 19 arch/parisc/kernel/syscalls/syscalltbl.sh | 36 --- 4 files changed, 12 insertions(+), 60 deletions(-) delete mode 100644 arch/parisc/kernel/syscalls/syscalltbl.sh diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 4406475a2304..e6e7f74c8ac9 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += user.h diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 322503780db6..3f24a0af1e04 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -919,24 +919,24 @@ ENTRY(lws_table) END(lws_table) /* End of lws table */ +#ifdef CONFIG_64BIT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) +#else +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#endif #define __SYSCALL(nr, entry) ASM_ULONG_INSN entry .align 8 ENTRY(sys_call_table) .export sys_call_table,data -#ifdef CONFIG_64BIT -#include/* Compat syscalls */ -#else -#include /* 32-bit native syscalls */ -#endif +#include /* 32-bit syscalls */ END(sys_call_table) #ifdef CONFIG_64BIT .align 8 ENTRY(sys_call_table64) -#include /* 64-bit native syscalls */ +#include /* 64-bit syscalls */ END(sys_call_table64) #endif -#undef __SYSCALL /* All light-weight-syscall atomic operations @@ -961,5 +961,3 @@ END(lws_lock_start) .previous .end - - diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile index 556fe30a6c8f..77fea5beb9be 100644 --- a/arch/parisc/kernel/syscalls/Makefile +++ b/arch/parisc/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(abis) syshdr_abis_unistd_32 := common,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE @@ -29,23 +26,17 @@ syshdr_abis_unistd_64 := common,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: abis := common,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: abis := common,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ - syscall_table_64.h \ - syscall_table_c32.h + syscall_table_64.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index f7393a7b18aa.. --- a/arch/parisc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n
[PATCH 12/27] m68k: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts m68k to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/m68k/kernel/syscalls/Makefile | 7 ++ arch/m68k/kernel/syscalls/syscalltbl.sh | 32 - arch/m68k/kernel/syscalltable.S | 3 +-- 3 files changed, 3 insertions(+), 39 deletions(-) delete mode 100644 arch/m68k/kernel/syscalls/syscalltbl.sh diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile index 1c42d2d2926d..6610130c67bc 100644 --- a/arch/m68k/kernel/syscalls/Makefile +++ b/arch/m68k/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/m68k/kernel/syscalls/syscalltbl.sh b/arch/m68k/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad.. --- a/arch/m68k/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index d329cc7b481c..e25ef4a9df30 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -18,9 +18,8 @@ #define sys_mmap2 sys_mmap_pgoff #endif -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL(nr, entry) .long entry .section .rodata ALIGN ENTRY(sys_call_table) #include -#undef __SYSCALL -- 2.27.0
[PATCH 17/27] parisc: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/parisc/kernel/syscalls/Makefile | 17 + 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile index c22a21c39f30..556fe30a6c8f 100644 --- a/arch/parisc/kernel/syscalls/Makefile +++ b/arch/parisc/kernel/syscalls/Makefile @@ -22,24 +22,24 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_abis_unistd_32 := common,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abis_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_abis_syscall_table_32 := common,32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_64 := common,64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) +$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) systbl_abis_syscall_table_c32 := common,32 systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h @@ -47,9 +47,10 @@ kapisyshdr-y += syscall_table_32.h \ syscall_table_64.h \ syscall_table_c32.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 10/27] ia64: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts ia64 to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/ia64/kernel/entry.S| 3 +-- arch/ia64/kernel/syscalls/Makefile | 8 ++- arch/ia64/kernel/syscalls/syscalltbl.sh | 32 - 3 files changed, 3 insertions(+), 40 deletions(-) delete mode 100644 arch/ia64/kernel/syscalls/syscalltbl.sh diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e98e3dafffd8..5eba3fb2e311 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1420,10 +1420,9 @@ END(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ -#define __SYSCALL(nr, entry, nargs) data8 entry +#define __SYSCALL(nr, entry) data8 entry .rodata .align 8 .globl sys_call_table sys_call_table: #include -#undef __SYSCALL diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile index b9bfd186295f..3f7e0bfae7e3 100644 --- a/arch/ia64/kernel/syscalls/Makefile +++ b/arch/ia64/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,16 +16,12 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ "" 1024 syshdr_offset_unistd_64 := __NR_Linux $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_offset_syscall_table := 1024 $(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/ia64/kernel/syscalls/syscalltbl.sh b/arch/ia64/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad.. --- a/arch/ia64/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" -- 2.27.0
[PATCH 27/27] xtensa: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts xtensa to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/xtensa/kernel/syscall.c | 3 +-- arch/xtensa/kernel/syscalls/Makefile | 7 ++--- arch/xtensa/kernel/syscalls/syscalltbl.sh | 32 --- 3 files changed, 3 insertions(+), 39 deletions(-) delete mode 100644 arch/xtensa/kernel/syscalls/syscalltbl.sh diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 2c415fce6801..a453d17f0da8 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -31,9 +31,8 @@ syscall_t sys_call_table[__NR_syscalls] /* FIXME __cacheline_aligned */= { [0 ... __NR_syscalls - 1] = (syscall_t)&sys_ni_syscall, -#define __SYSCALL(nr, entry, nargs)[nr] = (syscall_t)entry, +#define __SYSCALL(nr, entry)[nr] = (syscall_t)entry, #include -#undef __SYSCALL }; #define COLOUR_ALIGN(addr, pgoff) \ diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile index 1c42d2d2926d..6610130c67bc 100644 --- a/arch/xtensa/kernel/syscalls/Makefile +++ b/arch/xtensa/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/xtensa/kernel/syscalls/syscalltbl.sh b/arch/xtensa/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad.. --- a/arch/xtensa/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" -- 2.27.0
[PATCH 08/27] alpha: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts alpha to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/alpha/kernel/syscalls/Makefile | 7 ++ arch/alpha/kernel/syscalls/syscalltbl.sh | 32 arch/alpha/kernel/systbls.S | 3 +-- 3 files changed, 3 insertions(+), 39 deletions(-) delete mode 100644 arch/alpha/kernel/syscalls/syscalltbl.sh diff --git a/arch/alpha/kernel/syscalls/Makefile b/arch/alpha/kernel/syscalls/Makefile index 1c42d2d2926d..6610130c67bc 100644 --- a/arch/alpha/kernel/syscalls/Makefile +++ b/arch/alpha/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/alpha/kernel/syscalls/syscalltbl.sh b/arch/alpha/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad.. --- a/arch/alpha/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 9704f22ed5e3..68f3e4f329eb 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -7,10 +7,9 @@ #include -#define __SYSCALL(nr, entry, nargs) .quad entry +#define __SYSCALL(nr, entry) .quad entry .data .align 3 .globl sys_call_table sys_call_table: #include -#undef __SYSCALL -- 2.27.0
[PATCH 23/27] sparc: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts sparc to use scripts/syscalltbl.sh. This also unifies syscall_table_64.h and syscall_table_c32.h. Signed-off-by: Masahiro Yamada --- arch/sparc/include/asm/Kbuild| 1 - arch/sparc/kernel/syscalls/Makefile | 19 - arch/sparc/kernel/syscalls/syscalltbl.sh | 36 arch/sparc/kernel/systbls_32.S | 4 +-- arch/sparc/kernel/systbls_64.S | 8 -- 5 files changed, 12 insertions(+), 56 deletions(-) delete mode 100644 arch/sparc/kernel/syscalls/syscalltbl.sh diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index aec20406145e..0b9d98ced34a 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generic-y += export.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile index 556fe30a6c8f..77fea5beb9be 100644 --- a/arch/sparc/kernel/syscalls/Makefile +++ b/arch/sparc/kernel/syscalls/Makefile @@ -7,7 +7,7 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscall := $(srctree)/$(src)/syscall.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -16,10 +16,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))'\ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(abis) syshdr_abis_unistd_32 := common,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE @@ -29,23 +26,17 @@ syshdr_abis_unistd_64 := common,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: abis := common,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: abis := common,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ - syscall_table_64.h \ - syscall_table_c32.h + syscall_table_64.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) diff --git a/arch/sparc/kernel/syscalls/syscalltbl.sh b/arch/sparc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 77cf0143ba19.. --- a/arch/sparc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_nis_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index ab9e4d57685a..3aaffa017706 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -9,10 +9,10 @@ * Copyright (C) 1995 Adrian M. Rodriguez (adr...@remus.rutgers.edu) */ -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, entry) .long entry .data .align 4 .globl sys_call_table sys_cal
[PATCH 09/27] ia64: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/ia64/kernel/syscalls/Makefile | 11 ++- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile index 813a58cba39c..b9bfd186295f 100644 --- a/arch/ia64/kernel/syscalls/Makefile +++ b/arch/ia64/kernel/syscalls/Makefile @@ -22,19 +22,20 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_offset_$(basetarget))' syshdr_offset_unistd_64 := __NR_Linux -$(uapi)/unistd_64.h: $(syscall) $(syshdr) +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) systbl_offset_syscall_table := 1024 -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_64.h kapisyshdr-y += syscall_table.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 07/27] alpha: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/alpha/kernel/syscalls/Makefile | 11 ++- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/alpha/kernel/syscalls/Makefile b/arch/alpha/kernel/syscalls/Makefile index 659faefdcb1d..1c42d2d2926d 100644 --- a/arch/alpha/kernel/syscalls/Makefile +++ b/arch/alpha/kernel/syscalls/Makefile @@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@ '$(systbl_abi_$(basetarget))'\ '$(systbl_offset_$(basetarget))' -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(kapi)/syscall_table.h: $(syscall) $(systbl) +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h kapisyshdr-y += syscall_table.h -targets+= $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: -- 2.27.0
[PATCH 03/27] x86/build: add missing FORCE and fix 'targets' to make if_changed work
The rules in this Makefile cannot detect the command line change because the prerequisite 'FORCE' is missing. Adding 'FORCE' will result in the headers being rebuilt every time because the 'targets' addition is also wrong; the file paths in 'targets' must be relative to the current Makefile. Fix all of them so the if_changed rules work correctly. Signed-off-by: Masahiro Yamada --- arch/x86/entry/syscalls/Makefile | 23 --- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile index 6fb9b57ed5ba..e1c7ddb7546b 100644 --- a/arch/x86/entry/syscalls/Makefile +++ b/arch/x86/entry/syscalls/Makefile @@ -24,34 +24,34 @@ quiet_cmd_hypercalls = HYPERCALLS $@ cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^) syshdr_abi_unistd_32 := i386 -$(uapi)/unistd_32.h: $(syscall32) $(syshdr) +$(uapi)/unistd_32.h: $(syscall32) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_32_ia32 := i386 syshdr_pfx_unistd_32_ia32 := ia32_ -$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) +$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_x32 := common,x32 syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT -$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) +$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall64) $(syshdr) +$(uapi)/unistd_64.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) syshdr_abi_unistd_64_x32 := x32 syshdr_pfx_unistd_64_x32 := x32_ -$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) +$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) -$(out)/syscalls_32.h: $(syscall32) $(systbl) +$(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE $(call if_changed,systbl) -$(out)/syscalls_64.h: $(syscall64) $(systbl) +$(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE $(call if_changed,systbl) -$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh +$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh FORCE $(call if_changed,hypercalls) $(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h @@ -62,9 +62,10 @@ syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h syshdr-$(CONFIG_X86_64)+= syscalls_64.h syshdr-$(CONFIG_XEN) += xen-hypercalls.h -targets+= $(uapisyshdr-y) $(syshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +syshdr-y := $(addprefix $(out)/, $(syshdr-y)) +targets+= $(addprefix ../../../../, $(uapisyshdr-y) $(syshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(out)/,$(syshdr-y)) +all: $(uapisyshdr-y) $(syshdr-y) @: -- 2.27.0
[PATCH 02/27] x86/syscalls: fix -Wmissing-prototypes warnings from COND_SYSCALL()
Building kernel/sys_ni.c with W=1 omits tons of -Wmissing-prototypes warnings. $ make W=1 kernel/sys_ni.o [ snip ] CC kernel/sys_ni.o In file included from kernel/sys_ni.c:10: ./arch/x86/include/asm/syscall_wrapper.h:83:14: warning: no previous prototype for '__x64_sys_io_setup' [-Wmissing-prototypes] 83 | __weak long __##abi##_##name(const struct pt_regs *__unused) \ | ^~ ./arch/x86/include/asm/syscall_wrapper.h:100:2: note: in expansion of macro '__COND_SYSCALL' 100 | __COND_SYSCALL(x64, sys_##name) | ^~ ./arch/x86/include/asm/syscall_wrapper.h:256:2: note: in expansion of macro '__X64_COND_SYSCALL' 256 | __X64_COND_SYSCALL(name) \ | ^~ kernel/sys_ni.c:39:1: note: in expansion of macro 'COND_SYSCALL' 39 | COND_SYSCALL(io_setup); | ^~~~ ./arch/x86/include/asm/syscall_wrapper.h:83:14: warning: no previous prototype for '__ia32_sys_io_setup' [-Wmissing-prototypes] 83 | __weak long __##abi##_##name(const struct pt_regs *__unused) \ | ^~ ./arch/x86/include/asm/syscall_wrapper.h:120:2: note: in expansion of macro '__COND_SYSCALL' 120 | __COND_SYSCALL(ia32, sys_##name) | ^~ ./arch/x86/include/asm/syscall_wrapper.h:257:2: note: in expansion of macro '__IA32_COND_SYSCALL' 257 | __IA32_COND_SYSCALL(name) | ^~~ kernel/sys_ni.c:39:1: note: in expansion of macro 'COND_SYSCALL' 39 | COND_SYSCALL(io_setup); | ^~~~ ... __SYS_STUB0() and __SYS_STUBx() defined a few lines above have forward declarations. Let's do likewise for __COND_SYSCALL() to fix the warnings. Signed-off-by: Masahiro Yamada --- arch/x86/include/asm/syscall_wrapper.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h index a84333adeef2..80c08c7d5e72 100644 --- a/arch/x86/include/asm/syscall_wrapper.h +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -80,6 +80,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); } #define __COND_SYSCALL(abi, name) \ + __weak long __##abi##_##name(const struct pt_regs *__unused); \ __weak long __##abi##_##name(const struct pt_regs *__unused)\ { \ return sys_ni_syscall();\ -- 2.27.0
[PATCH 06/27] ARM: syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts ARM to use scripts/syscalltbl.sh. Signed-off-by: Masahiro Yamada --- arch/arm/kernel/entry-common.S | 8 arch/arm/tools/Makefile| 9 - arch/arm/tools/syscalltbl.sh | 22 -- 3 files changed, 8 insertions(+), 31 deletions(-) delete mode 100644 arch/arm/tools/syscalltbl.sh diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e0d7833a1827..7f0b7aba1498 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -344,20 +344,19 @@ ENTRY(\sym) .size \sym, . - \sym .endm -#define NATIVE(nr, func) syscall nr, func +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, func) syscall nr, func /* * This is the syscall table declaration for native ABI syscalls. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. */ syscall_table_start sys_call_table -#define COMPAT(nr, native, compat) syscall nr, native #ifdef CONFIG_AEABI #include #else #include #endif -#undef COMPAT syscall_table_end sys_call_table /* @@ -455,7 +454,8 @@ ENDPROC(sys_oabi_readahead) * using the compatibility syscall entries. */ syscall_table_start sys_oabi_call_table -#define COMPAT(nr, native, compat) syscall nr, compat +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #include syscall_table_end sys_oabi_call_table diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 27d8beb7c941..c331cfe92b3c 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -10,7 +10,7 @@ kapi := $(gen)/asm uapi := $(gen)/uapi/asm syshdr := $(srctree)/$(src)/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh syscall := $(srctree)/$(src)/syscall.tbl gen-y := $(gen)/calls-oabi.S @@ -47,8 +47,7 @@ quiet_cmd_syshdr = SYSHDR $@ '__NR_SYSCALL_BASE' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abi_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(abis) quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ @@ -70,10 +69,10 @@ sysnr_abi_unistd-nr := common,oabi,eabi,compat $(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_calls-oabi := common,oabi +$(gen)/calls-oabi.S: abis := common,oabi $(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_calls-eabi := common,eabi +$(gen)/calls-eabi.S: abis := common,eabi $(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/arm/tools/syscalltbl.sh b/arch/arm/tools/syscalltbl.sh deleted file mode 100644 index ae7e93cfbfd3.. --- a/arch/arm/tools/syscalltbl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( -while read nr abi name entry compat; do -if [ "$abi" = "eabi" -a -n "$compat" ]; then -echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2 -exit 1 -fi - - if [ -n "$entry" ]; then -if [ -z "$compat" ]; then -echo "NATIVE($nr, $entry)" -else -echo "COMPAT($nr, $entry, $compat)" -fi -fi -done -) > "$out" -- 2.27.0
[PATCH 05/27] x86/syscalls: switch to generic syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts in order to generate syscall table headers. My goal is to unify them into the single scripts/syscalltbl.sh. This commit converts x86 and UML to use scripts/syscalltbl.sh. Currently, syscall_64.h mixes up x86_64 and x32 syscalls. This commit separates syscall_64.h and syscall_x32.h. Signed-off-by: Masahiro Yamada --- arch/x86/entry/syscall_32.c | 12 +-- arch/x86/entry/syscall_64.c | 9 ++ arch/x86/entry/syscall_x32.c | 15 +++-- arch/x86/entry/syscalls/Makefile | 10 -- arch/x86/entry/syscalls/syscalltbl.sh | 46 --- arch/x86/include/asm/Kbuild | 1 + arch/x86/um/sys_call_table_32.c | 8 +++-- arch/x86/um/sys_call_table_64.c | 9 ++ 8 files changed, 34 insertions(+), 76 deletions(-) delete mode 100644 arch/x86/entry/syscalls/syscalltbl.sh diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index 86eb0d89d46f..70bf46e73b1c 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -8,12 +8,18 @@ #include #include -#define __SYSCALL_I386(nr, sym) extern long __ia32_##sym(const struct pt_regs *); +#ifdef CONFIG_IA32_EMULATION +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) +#else +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#endif + +#define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *); #include -#undef __SYSCALL_I386 +#undef __SYSCALL -#define __SYSCALL_I386(nr, sym) [nr] = __ia32_##sym, +#define __SYSCALL(nr, sym) [nr] = __ia32_##sym, __visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = { /* diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 1594ec72bcbb..82670bb10931 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -8,14 +8,11 @@ #include #include -#define __SYSCALL_X32(nr, sym) -#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym) - -#define __SYSCALL_64(nr, sym) extern long __x64_##sym(const struct pt_regs *); +#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *); #include -#undef __SYSCALL_64 +#undef __SYSCALL -#define __SYSCALL_64(nr, sym) [nr] = __x64_##sym, +#define __SYSCALL(nr, sym) [nr] = __x64_##sym, asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { /* diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index 3fea8fb9cd6a..6d2ef887d7b6 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -8,16 +8,11 @@ #include #include -#define __SYSCALL_64(nr, sym) +#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *); +#include +#undef __SYSCALL -#define __SYSCALL_X32(nr, sym) extern long __x64_##sym(const struct pt_regs *); -#define __SYSCALL_COMMON(nr, sym) extern long __x64_##sym(const struct pt_regs *); -#include -#undef __SYSCALL_X32 -#undef __SYSCALL_COMMON - -#define __SYSCALL_X32(nr, sym) [nr] = __x64_##sym, -#define __SYSCALL_COMMON(nr, sym) [nr] = __x64_##sym, +#define __SYSCALL(nr, sym) [nr] = __x64_##sym, asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = { /* @@ -25,5 +20,5 @@ asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = { * when the & below is removed. */ [0 ... __NR_x32_syscall_max] = &__x64_sys_ni_syscall, -#include +#include }; diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile index e1c7ddb7546b..4409d148af1e 100644 --- a/arch/x86/entry/syscalls/Makefile +++ b/arch/x86/entry/syscalls/Makefile @@ -10,7 +10,7 @@ syscall32 := $(srctree)/$(src)/syscall_32.tbl syscall64 := $(srctree)/$(src)/syscall_64.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -18,7 +18,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_pfx_$(basetarget))' \ '$(syshdr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(abis) quiet_cmd_hypercalls = HYPERCALLS $@ cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^) @@ -46,10 +46,15 @@ syshdr_pfx_unistd_64_x32 := x32_ $(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE $(call if_changed,syshdr) +$(out)/syscalls_32.h: abis := i386 $(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE $(call if_changed,systbl) +$(out)/syscalls_64.h: abis := common,64 $(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE $(call if_changed,systbl) +$(out)/syscalls_x32.h: abis := common,x32 +$(out)/syscalls_x32.h: $(syscall64) $(systbl) FORCE + $(call if_changed
[PATCH 04/27] x86/entry/x32: rename __x32_compat_sys_* to __x64_compat_sys_*
In arch/x86/entry/syscall_x32.c, the macros are mapped to symbols as follows: __SYSCALL_COMMON(nr, sym) --> __x64_ __SYSCALL_X32(nr, sym) --> __x32_ Originally, the syscalls in the x32 special range (512-547) were all compat. This assumption is now broken after the following commits: 55db9c0e8534 ("net: remove compat_sys_{get,set}sockopt") 5f764d624a89 ("fs: remove the compat readv/writev syscalls") 598b3cec831f ("fs: remove compat_sys_vmsplice") c3973b401ef2 ("mm: remove compat_process_vm_{readv,writev}") Those commits redefined __x32_sys_* to __x64_sys_* because there is no stub like __x32_sys_*. I think defining as follows is sensible and cleaner. __SYSCALL_COMMON(nr, sym) --> __x64_ __SYSCALL_X32(nr, sym) --> __x64_ The ugly #define __x32_sys_* will go away. Signed-off-by: Masahiro Yamada --- arch/x86/entry/syscall_x32.c | 16 ++-- arch/x86/include/asm/syscall_wrapper.h | 10 +- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index f2fe0a33bcfd..3fea8fb9cd6a 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -8,27 +8,15 @@ #include #include -/* - * Reuse the 64-bit entry points for the x32 versions that occupy different - * slots in the syscall table. - */ -#define __x32_sys_readv__x64_sys_readv -#define __x32_sys_writev __x64_sys_writev -#define __x32_sys_getsockopt __x64_sys_getsockopt -#define __x32_sys_setsockopt __x64_sys_setsockopt -#define __x32_sys_vmsplice __x64_sys_vmsplice -#define __x32_sys_process_vm_readv __x64_sys_process_vm_readv -#define __x32_sys_process_vm_writev__x64_sys_process_vm_writev - #define __SYSCALL_64(nr, sym) -#define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *); +#define __SYSCALL_X32(nr, sym) extern long __x64_##sym(const struct pt_regs *); #define __SYSCALL_COMMON(nr, sym) extern long __x64_##sym(const struct pt_regs *); #include #undef __SYSCALL_X32 #undef __SYSCALL_COMMON -#define __SYSCALL_X32(nr, sym) [nr] = __x32_##sym, +#define __SYSCALL_X32(nr, sym) [nr] = __x64_##sym, #define __SYSCALL_COMMON(nr, sym) [nr] = __x64_##sym, asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = { diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h index 80c08c7d5e72..6a2827d0681f 100644 --- a/arch/x86/include/asm/syscall_wrapper.h +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -17,7 +17,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); * __x64_sys_*() - 64-bit native syscall * __ia32_sys_*()- 32-bit native syscall or common compat syscall * __ia32_compat_sys_*() - 32-bit compat syscall - * __x32_compat_sys_*() - 64-bit X32 compat syscall + * __x64_compat_sys_*() - 64-bit X32 compat syscall * * The registers are decoded according to the ABI: * 64-bit: RDI, RSI, RDX, R10, R8, R9 @@ -166,17 +166,17 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); * with x86_64 obviously do not need such care. */ #define __X32_COMPAT_SYS_STUB0(name) \ - __SYS_STUB0(x32, compat_sys_##name) + __SYS_STUB0(x64, compat_sys_##name) #define __X32_COMPAT_SYS_STUBx(x, name, ...) \ - __SYS_STUBx(x32, compat_sys##name, \ + __SYS_STUBx(x64, compat_sys##name, \ SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__)) #define __X32_COMPAT_COND_SYSCALL(name) \ - __COND_SYSCALL(x32, compat_sys_##name) + __COND_SYSCALL(x64, compat_sys_##name) #define __X32_COMPAT_SYS_NI(name) \ - __SYS_NI(x32, compat_sys_##name) + __SYS_NI(x64, compat_sys_##name) #else /* CONFIG_X86_X32 */ #define __X32_COMPAT_SYS_STUB0(name) #define __X32_COMPAT_SYS_STUBx(x, name, ...) -- 2.27.0
[PATCH 00/27] arch: syscalls: unifiy all syscalltbl.sh into scripts/syscalltbl.sh
As of v5.11-rc1, 12 architectures duplicate similar shell scripts: $ find arch -name syscalltbl.sh | sort arch/alpha/kernel/syscalls/syscalltbl.sh arch/arm/tools/syscalltbl.sh arch/ia64/kernel/syscalls/syscalltbl.sh arch/m68k/kernel/syscalls/syscalltbl.sh arch/microblaze/kernel/syscalls/syscalltbl.sh arch/mips/kernel/syscalls/syscalltbl.sh arch/parisc/kernel/syscalls/syscalltbl.sh arch/powerpc/kernel/syscalls/syscalltbl.sh arch/sh/kernel/syscalls/syscalltbl.sh arch/sparc/kernel/syscalls/syscalltbl.sh arch/x86/entry/syscalls/syscalltbl.sh arch/xtensa/kernel/syscalls/syscalltbl.sh This patch set unifies all of them into a single file, scripts/syscalltbl.sh. The code-diff is attractive: 51 files changed, 254 insertions(+), 674 deletions(-) delete mode 100644 arch/alpha/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/arm/tools/syscalltbl.sh delete mode 100644 arch/ia64/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/m68k/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/microblaze/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/mips/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/parisc/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/powerpc/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/sh/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/sparc/kernel/syscalls/syscalltbl.sh delete mode 100644 arch/x86/entry/syscalls/syscalltbl.sh delete mode 100644 arch/xtensa/kernel/syscalls/syscalltbl.sh create mode 100644 scripts/syscalltbl.sh Also, this includes Makefile fixes, and some x86 fixes and cleanups. My question is, how to merge this series. I am touching all architectures, but the first patch is a prerequisite of the rest of this series. One possibility is to ask the x86 maintainers to pickup the first 5 patches for v5.12-rc1, and then send the rest for v5.13-rc1, splitting per-arch. I want the x86 maintainers to check the first 5 patches because I cleaned up the x32 code. I know x32 was considered for deprecation, but my motivation is to clean-up scripts across the tree without changing the functionality. Masahiro Yamada (27): scripts: add generic syscalltbl.sh x86/syscalls: fix -Wmissing-prototypes warnings from COND_SYSCALL() x86/build: add missing FORCE and fix 'targets' to make if_changed work x86/entry/x32: rename __x32_compat_sys_* to __x64_compat_sys_* x86/syscalls: switch to generic syscalltbl.sh ARM: syscalls: switch to generic syscalltbl.sh alpha: add missing FORCE and fix 'targets' to make if_changed work alpha: syscalls: switch to generic syscalltbl.sh ia64: add missing FORCE and fix 'targets' to make if_changed work ia64: syscalls: switch to generic syscalltbl.sh m68k: add missing FORCE and fix 'targets' to make if_changed work m68k: syscalls: switch to generic syscalltbl.sh microblaze: add missing FORCE and fix 'targets' to make if_changed work microblaze: syscalls: switch to generic syscalltbl.sh mips: add missing FORCE and fix 'targets' to make if_changed work mips: syscalls: switch to generic syscalltbl.sh parisc: add missing FORCE and fix 'targets' to make if_changed work parisc: syscalls: switch to generic syscalltbl.sh sh: add missing FORCE and fix 'targets' to make if_changed work sh: syscalls: switch to generic syscalltbl.sh sparc: remove wrong comment from arch/sparc/include/asm/Kbuild sparc: add missing FORCE and fix 'targets' to make if_changed work sparc: syscalls: switch to generic syscalltbl.sh powerpc: add missing FORCE and fix 'targets' to make if_changed work powerpc: syscalls: switch to generic syscalltbl.sh xtensa: add missing FORCE and fix 'targets' to make if_changed work xtensa: syscalls: switch to generic syscalltbl.sh arch/alpha/kernel/syscalls/Makefile | 18 +++ arch/alpha/kernel/syscalls/syscalltbl.sh | 32 --- arch/alpha/kernel/systbls.S | 3 +- arch/arm/kernel/entry-common.S| 8 +-- arch/arm/tools/Makefile | 9 ++-- arch/arm/tools/syscalltbl.sh | 22 arch/ia64/kernel/entry.S | 3 +- arch/ia64/kernel/syscalls/Makefile| 19 +++ arch/ia64/kernel/syscalls/syscalltbl.sh | 32 --- arch/m68k/kernel/syscalls/Makefile| 18 +++ arch/m68k/kernel/syscalls/syscalltbl.sh | 32 --- arch/m68k/kernel/syscalltable.S | 3 +- arch/microblaze/kernel/syscall_table.S| 3 +- arch/microblaze/kernel/syscalls/Makefile | 18 +++ arch/microblaze/kernel/syscalls/syscalltbl.sh | 32 --- arch/mips/include/asm/Kbuild | 7 ++- arch/mips/kernel/scall32-o32.S| 4 +- arch/mips/kernel/scall64-n32.S| 3 +- arch/mips/kernel/scall64-n64.S| 3 +- arch/mips/kernel/scall64-o32.S| 4 +- arch/mips/kernel/syscalls/Makefile| 53
[PATCH 01/27] scripts: add generic syscalltbl.sh
Most of architectures generate syscall headers at the compile time in the almost same way. The syscall table has the same format for all architectures. Each line has 3, 4 or 5 fields; syscall number, ABI, syscall name, native entry point, and compat entry point. The syscall table is processed by syscalltbl.sh script into header files. Despite the same pattern, scripts are maintained per architecture, which results in code duplication and bad maintainability. As of v5.11-rc1, 12 architectures duplicate similar shell scripts: $ find arch -name syscalltbl.sh | sort arch/alpha/kernel/syscalls/syscalltbl.sh arch/arm/tools/syscalltbl.sh arch/ia64/kernel/syscalls/syscalltbl.sh arch/m68k/kernel/syscalls/syscalltbl.sh arch/microblaze/kernel/syscalls/syscalltbl.sh arch/mips/kernel/syscalls/syscalltbl.sh arch/parisc/kernel/syscalls/syscalltbl.sh arch/powerpc/kernel/syscalls/syscalltbl.sh arch/sh/kernel/syscalls/syscalltbl.sh arch/sparc/kernel/syscalls/syscalltbl.sh arch/x86/entry/syscalls/syscalltbl.sh arch/xtensa/kernel/syscalls/syscalltbl.sh My goal is to unify them into a single file, scripts/syscalltbl.sh. For example, the i386 syscall table looks like this: 0 i386 restart_syscall sys_restart_syscall 1 i386 exit sys_exit 2 i386 fork sys_fork 3 i386 read sys_read 4 i386 writesys_write 5 i386 open sys_open compat_sys_open ... scripts/syscalltbl.sh generates the following code: __SYSCALL(0, sys_restart_syscall) __SYSCALL(1, sys_exit) __SYSCALL(2, sys_fork) __SYSCALL(3, sys_read) __SYSCALL(4, sys_write) __SYSCALL_WITH_COMPAT(5, sys_open, compat_sys_open) ... Then, the i386 kernel will do: #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) and the x86_64 kernel will do: #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) I noticed all 32/64 bit architectures can be covered by the same pattern. Having an arch-specific script is fine if there is a good reason to do so, but a single generic script should work for this case. Signed-off-by: Masahiro Yamada --- scripts/syscalltbl.sh | 52 +++ 1 file changed, 52 insertions(+) create mode 100644 scripts/syscalltbl.sh diff --git a/scripts/syscalltbl.sh b/scripts/syscalltbl.sh new file mode 100644 index ..15bf4e09f88c --- /dev/null +++ b/scripts/syscalltbl.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only +# +# Usage: +# scripts/syscalltbl.sh INFILE OUTFILE [ABIS] [OFFSET] +# +# INFILE: input syscall table +# OUTFILE: output file +# ABIS (optional): specify the ABIs to handle. +# If omitted, all lines are handled. +# OFFSET (optinal): spefify the offset of the syscall numbers. +# If omitted, the offset is zero. +# +# The syscall table format: +# nr abi name native [compat] +# +# nr: syscall number +# abi: ABI name +# name: syscall name +# native: native entry point +# compat (optional): compat entry point + +set -e + +in="$1" +out="$2" +abis=$(echo "($3)" | tr ',' '|') +offset="${4:-0}" + +nxt=$offset + +grep -E "^[0-9]+[[:space:]]+${abis}" "$in" | sort -n | { + + while read nr abi name native compat ; do + + nr=$((nr + $offset)) + + while [ $nxt -lt $nr ]; do + echo "__SYSCALL($nxt, sys_ni_syscall)" + nxt=$((nxt + 1)) + done + + if [ -n "$compat" ]; then + echo "__SYSCALL_WITH_COMPAT($nr, $native, $compat)" + elif [ -n "$native" ]; then + echo "__SYSCALL($nr, $native)" + else + echo "__SYSCALL($nr, sys_ni_syscall)" + fi + nxt=$((nr + 1)) + done +} > "$out" -- 2.27.0
Re: [PATCH] powerpc/fault: fix wrong KUAP fault for IO_URING
Excerpts from Jens Axboe's message of January 28, 2021 5:29 am: > On 1/27/21 9:38 AM, Christophe Leroy wrote: >> >> >> Le 27/01/2021 à 15:56, Zorro Lang a écrit : >>> On powerpc, io_uring test hit below KUAP fault on __do_page_fault. >>> The fail source line is: >>> >>>if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, >>> is_write))) >>>return SIGSEGV; >>> >>> The is_user() is based on user_mod(regs) only. This's not suit for >>> io_uring, where the helper thread can assume the user app identity >>> and could perform this fault just fine. So turn to use mm to decide >>> if this is valid or not. >> >> I don't understand why testing is_user would be an issue. KUAP purpose >> it to block any unallowed access from kernel to user memory >> (Equivalent to SMAP on x86). So it really must be based on MSR_PR bit, >> that is what is_user provides. >> >> If the kernel access is legitimate, kernel should have opened >> userspace access then you shouldn't get this "Bug: Read fault blocked >> by KUAP!". >> >> As far as I understand, the fault occurs in >> iov_iter_fault_in_readable() which calls fault_in_pages_readable() And >> fault_in_pages_readable() uses __get_user() so it is a legitimate >> access and you really should get a KUAP fault. >> >> So the problem is somewhere else, I think you proposed patch just >> hides the problem, it doesn't fix it. > > If we do kthread_use_mm(), can we agree that the user access is valid? Yeah the io uring code is fine, provided it uses the uaccess primitives like any other kernel code. It's looking more like a an arch/powerpc bug. > We should be able to copy to/from user space, and including faults, if > that's been done and the new mm assigned. Because it really should be. > If SMAP was a problem on x86, we would have seen it long ago. > > I'm assuming this may be breakage related to the recent uaccess changes > related to set_fs and friends? Or maybe recent changes on the powerpc > side? > > Zorro, did 5.10 work? Would be interesting to know. Thanks, Nick
Re: [PATCH v3 28/32] powerpc/64s: interrupt implement exit logic in C
Excerpts from Christophe Leroy's message of January 27, 2021 6:54 pm: > > > Le 25/02/2020 à 18:35, Nicholas Piggin a écrit : >> Implement the bulk of interrupt return logic in C. The asm return code >> must handle a few cases: restoring full GPRs, and emulating stack store. >> >> The stack store emulation is significantly simplfied, rather than creating >> a new return frame and switching to that before performing the store, it >> uses the PACA to keep a scratch register around to perform thestore. >> >> The asm return code is moved into 64e for now. The new logic has made >> allowance for 64e, but I don't have a full environment that works well >> to test it, and even booting in emulated qemu is not great for stress >> testing. 64e shouldn't be too far off working with this, given a bit >> more testing and auditing of the logic. >> >> This is slightly faster on a POWER9 (page fault speed increases about >> 1.1%), probably due to reduced mtmsrd. >> > > How do you measure 'page fault' speed ? mmap 1000 pages, store to each one, mprotect(PROT_READ) then mprotect(PROT_READ|PROT_WRITE), then store a byte to each page and measure the cost. Something like that IIRC. Thanks, Nick
Re: [PATCH v6 05/39] powerpc: remove arguments from fault handler functions
Excerpts from Christophe Leroy's message of January 27, 2021 4:38 pm: > > > Le 15/01/2021 à 17:49, Nicholas Piggin a écrit : >> Make mm fault handlers all just take the pt_regs * argument and load >> DAR/DSISR from that. Make those that return a value return long. >> >> This is done to make the function signatures match other handlers, which >> will help with a future patch to add wrappers. Explicit arguments could >> be added for performance but that would require more wrapper macro >> variants. >> >> Signed-off-by: Nicholas Piggin >> --- >> arch/powerpc/include/asm/asm-prototypes.h | 4 ++-- >> arch/powerpc/include/asm/book3s/64/mmu-hash.h | 2 +- >> arch/powerpc/include/asm/bug.h| 2 +- >> arch/powerpc/kernel/entry_32.S| 7 +-- >> arch/powerpc/kernel/exceptions-64e.S | 2 -- >> arch/powerpc/kernel/exceptions-64s.S | 17 - >> arch/powerpc/kernel/head_40x.S| 10 +- >> arch/powerpc/kernel/head_8xx.S| 6 +++--- >> arch/powerpc/kernel/head_book3s_32.S | 5 ++--- >> arch/powerpc/kernel/head_booke.h | 4 +--- >> arch/powerpc/mm/book3s64/hash_utils.c | 8 +--- >> arch/powerpc/mm/book3s64/slb.c| 11 +++ >> arch/powerpc/mm/fault.c | 5 ++--- >> 13 files changed, 34 insertions(+), 49 deletions(-) >> > >> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S >> index 238eacfda7b0..d6ea3f2d6cc0 100644 >> --- a/arch/powerpc/kernel/entry_32.S >> +++ b/arch/powerpc/kernel/entry_32.S >> @@ -276,8 +276,7 @@ reenable_mmu: >> * We save a bunch of GPRs, >> * r3 can be different from GPR3(r1) at this point, r9 and r11 >> * contains the old MSR and handler address respectively, >> - * r4 & r5 can contain page fault arguments that need to be passed >> - * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left >> + * r0, r4-r8, r12, CCR, CTR, XER etc... are left >> * clobbered as they aren't useful past this point. >> */ >> >> @@ -285,15 +284,11 @@ reenable_mmu: >> stw r9,8(r1) >> stw r11,12(r1) >> stw r3,16(r1) > > As all functions only take 'regs' as input parameter, maybe we can avoid > saving 'r3' by > recalculating it from r1 after the call with 'addi > r3,r1,STACK_FRAME_OVERHEAD' ? It seems like it. All functions have regs as first parameter already don't they? So this change could be done before this patch as well. > >> -stw r4,20(r1) >> -stw r5,24(r1) > > Patch 6 needs to go before this change. Probably the easiest would be to > apply patch 6 before patch > 5. Or this change needs to go after. Hmm okay thanks for finding that. Thanks, Nick
RE: [PATCH v4 11/23] powerpc/syscall: Rename syscall_64.c into syscall.c
Excerpts from David Laight's message of January 26, 2021 8:28 pm: > From: Nicholas Piggin >> Sent: 26 January 2021 10:21 >> >> Excerpts from Christophe Leroy's message of January 26, 2021 12:48 am: >> > syscall_64.c will be reused almost as is for PPC32. >> > >> > Rename it syscall.c >> >> Could you rename it to interrupt.c instead? A system call is an >> interrupt, and the file now also has code to return from other >> interrupts as well, and it matches the new asm/interrupt.h from >> the interrupts series. > > Hmmm > > That might make it harder for someone looking for the system call > entry code to find it. It's very grep'able. > In some sense interrupts are the simpler case. > > Especially when comparing with other architectures which have > special instructions for syscall entry. powerpc does have a special instruction for syscall, and it causes a system call interrupt. I'm not sure about other architectures, but for powerpc its more sensible to call it interrupt.c than syscall.c. Thanks, Nick
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On Wed, Jan 27, 2021 at 01:31:02PM -0500, Mimi Zohar wrote: > On Wed, 2021-01-27 at 10:24 -0800, Lakshmi Ramasubramanian wrote: > > On 1/27/21 10:02 AM, Will Deacon wrote: > > > On Wed, Jan 27, 2021 at 09:56:53AM -0800, Lakshmi Ramasubramanian wrote: > > >> On 1/27/21 8:54 AM, Will Deacon wrote: > > >>> On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: > > Address and size of the buffer containing the IMA measurement log need > > to be passed from the current kernel to the next kernel on kexec. > > > > Add address and size fields to "struct kimage_arch" for ARM64 platform > > to hold the address and size of the IMA measurement log buffer. > > > > Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA > > is enabled, to indicate that the IMA measurement log information is > > present in the device tree for ARM64. > > > > Co-developed-by: Prakhar Srivastava > > Signed-off-by: Prakhar Srivastava > > Signed-off-by: Lakshmi Ramasubramanian > > Reviewed-by: Thiago Jung Bauermann > > --- > > arch/arm64/Kconfig | 1 + > > arch/arm64/include/asm/kexec.h | 5 + > > 2 files changed, 6 insertions(+) > > > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > > index 1d466addb078..ea7f7fe3dccd 100644 > > --- a/arch/arm64/Kconfig > > +++ b/arch/arm64/Kconfig > > @@ -1094,6 +1094,7 @@ config KEXEC > > config KEXEC_FILE > > bool "kexec file based system call" > > select KEXEC_CORE > > + select HAVE_IMA_KEXEC if IMA > > help > > This is new version of kexec system call. This system call is > > file based and takes file descriptors as system call argument > > diff --git a/arch/arm64/include/asm/kexec.h > > b/arch/arm64/include/asm/kexec.h > > index d24b527e8c00..2bd19ccb6c43 100644 > > --- a/arch/arm64/include/asm/kexec.h > > +++ b/arch/arm64/include/asm/kexec.h > > @@ -100,6 +100,11 @@ struct kimage_arch { > > void *elf_headers; > > unsigned long elf_headers_mem; > > unsigned long elf_headers_sz; > > + > > +#ifdef CONFIG_IMA_KEXEC > > + phys_addr_t ima_buffer_addr; > > + size_t ima_buffer_size; > > +#endif > > >>> > > >>> Why do these need to be in the arch structure instead of 'struct > > >>> kimage'? > > >>> > > >> > > >> Currently, only powerpc and, with this patch set, arm64 have support for > > >> carrying forward IMA measurement list across kexec system call. The above > > >> fields are used for tracking IMA measurement list. > > >> > > >> Do you see a reason to move these fields to "struct kimage"? > > > > > > If they're gated on CONFIG_IMA_KEXEC, then it seems harmless for them to > > > be added to the shared structure. Or are you saying that there are > > > architectures which have CONFIG_IMA_KEXEC but do not want these fields? > > > > > > > As far as I know, there are no other architectures that define > > CONFIG_IMA_KEXEC, but do not use these fields. > > Yes, CONFIG_IMA_KEXEC enables "carrying the IMA measurement list across > a soft boot". The only arch that currently carries the IMA > measurement across kexec is powerpc. Ok, in which case this sounds like it should be in the shared structure, no? Will
[PATCH] vio: make remove callback return void
The driver core ignores the return value of struct bus_type::remove() because there is only little that can be done. To simplify the quest to make this function return void, let struct vio_driver::remove() return void, too. All users already unconditionally return 0, this commit makes it obvious that returning an error code is a bad idea and makes it obvious for future driver authors that returning an error code isn't intended. Note there are two nominally different implementations for a vio bus: one in arch/sparc/kernel/vio.c and the other in arch/powerpc/platforms/pseries/vio.c. I didn't care to check which driver is using which of these busses (or if even some of them can be used with both) and simply adapt all drivers and the two bus codes in one go. Note that for the powerpc implementation there is a semantical change: Before this patch for a device that was bound to a driver without a remove callback vio_cmo_bus_remove(viodev) wasn't called. As the device core still considers the device unbound after vio_bus_remove() returns calling this unconditionally is the consistent behaviour which is implemented here. Signed-off-by: Uwe Kleine-König --- Hello, note that this change depends on https://lore.kernel.org/r/20210121062005.53271-1-...@linux.ibm.com which removes an (ignored) return -EBUSY in drivers/net/ethernet/ibm/ibmvnic.c. I don't know when/if this latter patch will be applied, so it might take some time until my patch can go in. Best regards Uwe arch/powerpc/include/asm/vio.h | 2 +- arch/powerpc/platforms/pseries/vio.c | 7 +++ arch/sparc/include/asm/vio.h | 2 +- arch/sparc/kernel/ds.c | 6 -- arch/sparc/kernel/vio.c | 4 ++-- drivers/block/sunvdc.c | 3 +-- drivers/char/hw_random/pseries-rng.c | 3 +-- drivers/char/tpm/tpm_ibmvtpm.c | 4 +--- drivers/crypto/nx/nx-842-pseries.c | 4 +--- drivers/crypto/nx/nx.c | 4 +--- drivers/misc/ibmvmc.c| 4 +--- drivers/net/ethernet/ibm/ibmveth.c | 4 +--- drivers/net/ethernet/ibm/ibmvnic.c | 4 +--- drivers/net/ethernet/sun/ldmvsw.c| 4 +--- drivers/net/ethernet/sun/sunvnet.c | 3 +-- drivers/scsi/ibmvscsi/ibmvfc.c | 3 +-- drivers/scsi/ibmvscsi/ibmvscsi.c | 4 +--- drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 4 +--- drivers/tty/hvc/hvcs.c | 3 +-- drivers/tty/vcc.c| 4 +--- 20 files changed, 22 insertions(+), 54 deletions(-) diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 0cf52746531b..721c0d6715ac 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -113,7 +113,7 @@ struct vio_driver { const char *name; const struct vio_device_id *id_table; int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); - int (*remove)(struct vio_dev *dev); + void (*remove)(struct vio_dev *dev); /* A driver must have a get_desired_dma() function to * be loaded in a CMO environment if it uses DMA. */ diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index b2797cfe4e2b..9cb4fc839fd5 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1261,7 +1261,6 @@ static int vio_bus_remove(struct device *dev) struct vio_dev *viodev = to_vio_dev(dev); struct vio_driver *viodrv = to_vio_driver(dev->driver); struct device *devptr; - int ret = 1; /* * Hold a reference to the device after the remove function is called @@ -1270,13 +1269,13 @@ static int vio_bus_remove(struct device *dev) devptr = get_device(dev); if (viodrv->remove) - ret = viodrv->remove(viodev); + viodrv->remove(viodev); - if (!ret && firmware_has_feature(FW_FEATURE_CMO)) + if (firmware_has_feature(FW_FEATURE_CMO)) vio_cmo_bus_remove(viodev); put_device(devptr); - return ret; + return 0; } /** diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 059f0eb678e0..8a1a83bbb6d5 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -362,7 +362,7 @@ struct vio_driver { struct list_headnode; const struct vio_device_id *id_table; int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); - int (*remove)(struct vio_dev *dev); + void (*remove)(struct vio_dev *dev); void (*shutdown)(struct vio_dev *dev); unsigned long driver_data; struct device_driverdriver; diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 522e5b51050c..4a5bdb0df779 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -1236,11 +1236,6 @@ static int ds_probe(struct vio_dev *
Re: [PATCH 1/2] PCI/AER: Disable AER interrupt during suspend
On Thu, Jan 28, 2021 at 01:31:00AM +0800, Kai-Heng Feng wrote: > Commit 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in > hint") enables ACS, and some platforms lose its NVMe after resume from > firmware: > [ 50.947816] pcieport :00:1b.0: DPC: containment event, status:0x1f01 > source:0x > [ 50.947817] pcieport :00:1b.0: DPC: unmasked uncorrectable error > detected > [ 50.947829] pcieport :00:1b.0: PCIe Bus Error: severity=Uncorrected > (Non-Fatal), type=Transaction Layer, (Receiver ID) > [ 50.947830] pcieport :00:1b.0: device [8086:06ac] error > status/mask=0020/0001 > [ 50.947831] pcieport :00:1b.0:[21] ACSViol(First) > [ 50.947841] pcieport :00:1b.0: AER: broadcast error_detected message > [ 50.947843] nvme nvme0: frozen state error detected, reset controller > > It happens right after ACS gets enabled during resume. > > To prevent that from happening, disable AER interrupt and enable it on > system suspend and resume, respectively. Lots of questions here. Maybe this is what we'll end up doing, but I am curious about why the error is reported in the first place. Is this a consequence of the link going down and back up? Is it consequence of the device doing a DMA when it shouldn't? Are we doing something in the wrong order during suspend? Or maybe resume, since I assume the error is reported during resume? If we *do* take the error, why doesn't DPC recovery work? > Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=209149 > Fixes: 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in hint") > Signed-off-by: Kai-Heng Feng > --- > drivers/pci/pcie/aer.c | 18 ++ > 1 file changed, 18 insertions(+) > > diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c > index 77b0f2c45bc0..0e9a85530ae6 100644 > --- a/drivers/pci/pcie/aer.c > +++ b/drivers/pci/pcie/aer.c > @@ -1365,6 +1365,22 @@ static int aer_probe(struct pcie_device *dev) > return 0; > } > > +static int aer_suspend(struct pcie_device *dev) > +{ > + struct aer_rpc *rpc = get_service_data(dev); > + > + aer_disable_rootport(rpc); > + return 0; > +} > + > +static int aer_resume(struct pcie_device *dev) > +{ > + struct aer_rpc *rpc = get_service_data(dev); > + > + aer_enable_rootport(rpc); > + return 0; > +} > + > /** > * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP > * @dev: pointer to Root Port, RCEC, or RCiEP > @@ -1437,6 +1453,8 @@ static struct pcie_port_service_driver aerdriver = { > .service= PCIE_PORT_SERVICE_AER, > > .probe = aer_probe, > + .suspend= aer_suspend, > + .resume = aer_resume, > .remove = aer_remove, > }; > > -- > 2.29.2 >
[PATCH 2/2] PCI/DPC: Disable DPC interrupt during suspend
Commit 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in hint") enables ACS, and some platforms lose its NVMe after resume from firmware: [ 50.947816] pcieport :00:1b.0: DPC: containment event, status:0x1f01 source:0x [ 50.947817] pcieport :00:1b.0: DPC: unmasked uncorrectable error detected [ 50.947829] pcieport :00:1b.0: PCIe Bus Error: severity=Uncorrected (Non-Fatal), type=Transaction Layer, (Receiver ID) [ 50.947830] pcieport :00:1b.0: device [8086:06ac] error status/mask=0020/0001 [ 50.947831] pcieport :00:1b.0:[21] ACSViol(First) [ 50.947841] pcieport :00:1b.0: AER: broadcast error_detected message [ 50.947843] nvme nvme0: frozen state error detected, reset controller Like what previous patch does to AER, introduce new helpers to disable DPC interrupt and enable it on system suspend and resume, respectively. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=209149 Fixes: 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in hint") Signed-off-by: Kai-Heng Feng --- drivers/pci/pcie/dpc.c | 49 -- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index e05aba86a317..d12289cb5d44 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -279,6 +279,28 @@ void pci_dpc_init(struct pci_dev *pdev) } } +static void dpc_enable(struct pcie_device *dev) +{ + struct pci_dev *pdev = dev->port; + u16 cap, ctl; + + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap); + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); + + ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); +} + +static void dpc_disable(struct pcie_device *dev) +{ + struct pci_dev *pdev = dev->port; + u16 ctl; + + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); + ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN); + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); +} + #define FLAG(x, y) (((x) & (y)) ? '+' : '-') static int dpc_probe(struct pcie_device *dev) { @@ -299,11 +321,7 @@ static int dpc_probe(struct pcie_device *dev) return status; } - pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap); - pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); - - ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; - pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); + dpc_enable(dev); pci_info(pdev, "enabled with IRQ %d\n", dev->irq); pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", @@ -316,14 +334,21 @@ static int dpc_probe(struct pcie_device *dev) return status; } -static void dpc_remove(struct pcie_device *dev) +static int dpc_suspend(struct pcie_device *dev) { - struct pci_dev *pdev = dev->port; - u16 ctl; + dpc_disable(dev); + return 0; +} - pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); - ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN); - pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); +static int dpc_resume(struct pcie_device *dev) +{ + dpc_enable(dev); + return 0; +} + +static void dpc_remove(struct pcie_device *dev) +{ + dpc_disable(dev); } static struct pcie_port_service_driver dpcdriver = { @@ -331,6 +356,8 @@ static struct pcie_port_service_driver dpcdriver = { .port_type = PCIE_ANY_PORT, .service= PCIE_PORT_SERVICE_DPC, .probe = dpc_probe, + .suspend= dpc_suspend, + .resume = dpc_resume, .remove = dpc_remove, }; -- 2.29.2
[PATCH 1/2] PCI/AER: Disable AER interrupt during suspend
Commit 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in hint") enables ACS, and some platforms lose its NVMe after resume from firmware: [ 50.947816] pcieport :00:1b.0: DPC: containment event, status:0x1f01 source:0x [ 50.947817] pcieport :00:1b.0: DPC: unmasked uncorrectable error detected [ 50.947829] pcieport :00:1b.0: PCIe Bus Error: severity=Uncorrected (Non-Fatal), type=Transaction Layer, (Receiver ID) [ 50.947830] pcieport :00:1b.0: device [8086:06ac] error status/mask=0020/0001 [ 50.947831] pcieport :00:1b.0:[21] ACSViol(First) [ 50.947841] pcieport :00:1b.0: AER: broadcast error_detected message [ 50.947843] nvme nvme0: frozen state error detected, reset controller It happens right after ACS gets enabled during resume. To prevent that from happening, disable AER interrupt and enable it on system suspend and resume, respectively. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=209149 Fixes: 50310600ebda ("iommu/vt-d: Enable PCI ACS for platform opt in hint") Signed-off-by: Kai-Heng Feng --- drivers/pci/pcie/aer.c | 18 ++ 1 file changed, 18 insertions(+) diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 77b0f2c45bc0..0e9a85530ae6 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -1365,6 +1365,22 @@ static int aer_probe(struct pcie_device *dev) return 0; } +static int aer_suspend(struct pcie_device *dev) +{ + struct aer_rpc *rpc = get_service_data(dev); + + aer_disable_rootport(rpc); + return 0; +} + +static int aer_resume(struct pcie_device *dev) +{ + struct aer_rpc *rpc = get_service_data(dev); + + aer_enable_rootport(rpc); + return 0; +} + /** * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP * @dev: pointer to Root Port, RCEC, or RCiEP @@ -1437,6 +1453,8 @@ static struct pcie_port_service_driver aerdriver = { .service= PCIE_PORT_SERVICE_AER, .probe = aer_probe, + .suspend= aer_suspend, + .resume = aer_resume, .remove = aer_remove, }; -- 2.29.2
Re: [PATCH] powerpc/fault: fix wrong KUAP fault for IO_URING
On 1/27/21 9:38 AM, Christophe Leroy wrote: > > > Le 27/01/2021 à 15:56, Zorro Lang a écrit : >> On powerpc, io_uring test hit below KUAP fault on __do_page_fault. >> The fail source line is: >> >>if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, >> is_write))) >>return SIGSEGV; >> >> The is_user() is based on user_mod(regs) only. This's not suit for >> io_uring, where the helper thread can assume the user app identity >> and could perform this fault just fine. So turn to use mm to decide >> if this is valid or not. > > I don't understand why testing is_user would be an issue. KUAP purpose > it to block any unallowed access from kernel to user memory > (Equivalent to SMAP on x86). So it really must be based on MSR_PR bit, > that is what is_user provides. > > If the kernel access is legitimate, kernel should have opened > userspace access then you shouldn't get this "Bug: Read fault blocked > by KUAP!". > > As far as I understand, the fault occurs in > iov_iter_fault_in_readable() which calls fault_in_pages_readable() And > fault_in_pages_readable() uses __get_user() so it is a legitimate > access and you really should get a KUAP fault. > > So the problem is somewhere else, I think you proposed patch just > hides the problem, it doesn't fix it. If we do kthread_use_mm(), can we agree that the user access is valid? We should be able to copy to/from user space, and including faults, if that's been done and the new mm assigned. Because it really should be. If SMAP was a problem on x86, we would have seen it long ago. I'm assuming this may be breakage related to the recent uaccess changes related to set_fs and friends? Or maybe recent changes on the powerpc side? Zorro, did 5.10 work? -- Jens Axboe
Re: [PATCH v15 09/10] arm64: Call kmalloc() to allocate DTB buffer
On Wed, Jan 27, 2021 at 09:59:38AM -0800, Lakshmi Ramasubramanian wrote: > On 1/27/21 8:52 AM, Will Deacon wrote: > > Hi Will, > > > On Fri, Jan 15, 2021 at 09:30:16AM -0800, Lakshmi Ramasubramanian wrote: > > > create_dtb() function allocates kernel virtual memory for > > > the device tree blob (DTB). This is not consistent with other > > > architectures, such as powerpc, which calls kmalloc() for allocating > > > memory for the DTB. > > > > > > Call kmalloc() to allocate memory for the DTB, and kfree() to free > > > the allocated memory. > > > > > > Co-developed-by: Prakhar Srivastava > > > Signed-off-by: Prakhar Srivastava > > > Signed-off-by: Lakshmi Ramasubramanian > > > --- > > > arch/arm64/kernel/machine_kexec_file.c | 12 +++- > > > 1 file changed, 7 insertions(+), 5 deletions(-) > > > > > > diff --git a/arch/arm64/kernel/machine_kexec_file.c > > > b/arch/arm64/kernel/machine_kexec_file.c > > > index 7de9c47dee7c..51c40143d6fa 100644 > > > --- a/arch/arm64/kernel/machine_kexec_file.c > > > +++ b/arch/arm64/kernel/machine_kexec_file.c > > > @@ -29,7 +29,7 @@ const struct kexec_file_ops * const > > > kexec_file_loaders[] = { > > > int arch_kimage_file_post_load_cleanup(struct kimage *image) > > > { > > > - vfree(image->arch.dtb); > > > + kfree(image->arch.dtb); > > > image->arch.dtb = NULL; > > > vfree(image->arch.elf_headers); > > > @@ -59,19 +59,21 @@ static int create_dtb(struct kimage *image, > > > + cmdline_len + DTB_EXTRA_SPACE; > > > for (;;) { > > > - buf = vmalloc(buf_size); > > > + buf = kmalloc(buf_size, GFP_KERNEL); > > > > Is there a functional need for this patch? I build the 'dtbs' target just > > now and sdm845-db845c.dtb is approaching 100K, which feels quite large > > for kmalloc(). > > Changing the allocation from vmalloc() to kmalloc() would help us further > consolidate the DTB setup code for powerpc and arm64. Ok, but at the risk of allocation failure. Can powerpc use vmalloc() instead? Will
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On Wed, 2021-01-27 at 10:24 -0800, Lakshmi Ramasubramanian wrote: > On 1/27/21 10:02 AM, Will Deacon wrote: > > On Wed, Jan 27, 2021 at 09:56:53AM -0800, Lakshmi Ramasubramanian wrote: > >> On 1/27/21 8:54 AM, Will Deacon wrote: > >>> On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: > Address and size of the buffer containing the IMA measurement log need > to be passed from the current kernel to the next kernel on kexec. > > Add address and size fields to "struct kimage_arch" for ARM64 platform > to hold the address and size of the IMA measurement log buffer. > > Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA > is enabled, to indicate that the IMA measurement log information is > present in the device tree for ARM64. > > Co-developed-by: Prakhar Srivastava > Signed-off-by: Prakhar Srivastava > Signed-off-by: Lakshmi Ramasubramanian > Reviewed-by: Thiago Jung Bauermann > --- > arch/arm64/Kconfig | 1 + > arch/arm64/include/asm/kexec.h | 5 + > 2 files changed, 6 insertions(+) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index 1d466addb078..ea7f7fe3dccd 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -1094,6 +1094,7 @@ config KEXEC > config KEXEC_FILE > bool "kexec file based system call" > select KEXEC_CORE > +select HAVE_IMA_KEXEC if IMA > help > This is new version of kexec system call. This system call is > file based and takes file descriptors as system call argument > diff --git a/arch/arm64/include/asm/kexec.h > b/arch/arm64/include/asm/kexec.h > index d24b527e8c00..2bd19ccb6c43 100644 > --- a/arch/arm64/include/asm/kexec.h > +++ b/arch/arm64/include/asm/kexec.h > @@ -100,6 +100,11 @@ struct kimage_arch { > void *elf_headers; > unsigned long elf_headers_mem; > unsigned long elf_headers_sz; > + > +#ifdef CONFIG_IMA_KEXEC > +phys_addr_t ima_buffer_addr; > +size_t ima_buffer_size; > +#endif > >>> > >>> Why do these need to be in the arch structure instead of 'struct kimage'? > >>> > >> > >> Currently, only powerpc and, with this patch set, arm64 have support for > >> carrying forward IMA measurement list across kexec system call. The above > >> fields are used for tracking IMA measurement list. > >> > >> Do you see a reason to move these fields to "struct kimage"? > > > > If they're gated on CONFIG_IMA_KEXEC, then it seems harmless for them to > > be added to the shared structure. Or are you saying that there are > > architectures which have CONFIG_IMA_KEXEC but do not want these fields? > > > > As far as I know, there are no other architectures that define > CONFIG_IMA_KEXEC, but do not use these fields. Yes, CONFIG_IMA_KEXEC enables "carrying the IMA measurement list across a soft boot". The only arch that currently carries the IMA measurement across kexec is powerpc. Mimi
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On 1/27/21 10:02 AM, Will Deacon wrote: On Wed, Jan 27, 2021 at 09:56:53AM -0800, Lakshmi Ramasubramanian wrote: On 1/27/21 8:54 AM, Will Deacon wrote: On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: Address and size of the buffer containing the IMA measurement log need to be passed from the current kernel to the next kernel on kexec. Add address and size fields to "struct kimage_arch" for ARM64 platform to hold the address and size of the IMA measurement log buffer. Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA is enabled, to indicate that the IMA measurement log information is present in the device tree for ARM64. Co-developed-by: Prakhar Srivastava Signed-off-by: Prakhar Srivastava Signed-off-by: Lakshmi Ramasubramanian Reviewed-by: Thiago Jung Bauermann --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/kexec.h | 5 + 2 files changed, 6 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1d466addb078..ea7f7fe3dccd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1094,6 +1094,7 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE + select HAVE_IMA_KEXEC if IMA help This is new version of kexec system call. This system call is file based and takes file descriptors as system call argument diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index d24b527e8c00..2bd19ccb6c43 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -100,6 +100,11 @@ struct kimage_arch { void *elf_headers; unsigned long elf_headers_mem; unsigned long elf_headers_sz; + +#ifdef CONFIG_IMA_KEXEC + phys_addr_t ima_buffer_addr; + size_t ima_buffer_size; +#endif Why do these need to be in the arch structure instead of 'struct kimage'? Currently, only powerpc and, with this patch set, arm64 have support for carrying forward IMA measurement list across kexec system call. The above fields are used for tracking IMA measurement list. Do you see a reason to move these fields to "struct kimage"? If they're gated on CONFIG_IMA_KEXEC, then it seems harmless for them to be added to the shared structure. Or are you saying that there are architectures which have CONFIG_IMA_KEXEC but do not want these fields? As far as I know, there are no other architectures that define CONFIG_IMA_KEXEC, but do not use these fields. Mimi - please correct me if I am wrong. thanks, -lakshmi
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On Wed, Jan 27, 2021 at 09:56:53AM -0800, Lakshmi Ramasubramanian wrote: > On 1/27/21 8:54 AM, Will Deacon wrote: > > On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: > > > Address and size of the buffer containing the IMA measurement log need > > > to be passed from the current kernel to the next kernel on kexec. > > > > > > Add address and size fields to "struct kimage_arch" for ARM64 platform > > > to hold the address and size of the IMA measurement log buffer. > > > > > > Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA > > > is enabled, to indicate that the IMA measurement log information is > > > present in the device tree for ARM64. > > > > > > Co-developed-by: Prakhar Srivastava > > > Signed-off-by: Prakhar Srivastava > > > Signed-off-by: Lakshmi Ramasubramanian > > > Reviewed-by: Thiago Jung Bauermann > > > --- > > > arch/arm64/Kconfig | 1 + > > > arch/arm64/include/asm/kexec.h | 5 + > > > 2 files changed, 6 insertions(+) > > > > > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > > > index 1d466addb078..ea7f7fe3dccd 100644 > > > --- a/arch/arm64/Kconfig > > > +++ b/arch/arm64/Kconfig > > > @@ -1094,6 +1094,7 @@ config KEXEC > > > config KEXEC_FILE > > > bool "kexec file based system call" > > > select KEXEC_CORE > > > + select HAVE_IMA_KEXEC if IMA > > > help > > > This is new version of kexec system call. This system call is > > > file based and takes file descriptors as system call argument > > > diff --git a/arch/arm64/include/asm/kexec.h > > > b/arch/arm64/include/asm/kexec.h > > > index d24b527e8c00..2bd19ccb6c43 100644 > > > --- a/arch/arm64/include/asm/kexec.h > > > +++ b/arch/arm64/include/asm/kexec.h > > > @@ -100,6 +100,11 @@ struct kimage_arch { > > > void *elf_headers; > > > unsigned long elf_headers_mem; > > > unsigned long elf_headers_sz; > > > + > > > +#ifdef CONFIG_IMA_KEXEC > > > + phys_addr_t ima_buffer_addr; > > > + size_t ima_buffer_size; > > > +#endif > > > > Why do these need to be in the arch structure instead of 'struct kimage'? > > > > Currently, only powerpc and, with this patch set, arm64 have support for > carrying forward IMA measurement list across kexec system call. The above > fields are used for tracking IMA measurement list. > > Do you see a reason to move these fields to "struct kimage"? If they're gated on CONFIG_IMA_KEXEC, then it seems harmless for them to be added to the shared structure. Or are you saying that there are architectures which have CONFIG_IMA_KEXEC but do not want these fields? Will
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On 1/27/21 8:54 AM, Will Deacon wrote: Hi Will, On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: Address and size of the buffer containing the IMA measurement log need to be passed from the current kernel to the next kernel on kexec. Add address and size fields to "struct kimage_arch" for ARM64 platform to hold the address and size of the IMA measurement log buffer. Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA is enabled, to indicate that the IMA measurement log information is present in the device tree for ARM64. Co-developed-by: Prakhar Srivastava Signed-off-by: Prakhar Srivastava Signed-off-by: Lakshmi Ramasubramanian Reviewed-by: Thiago Jung Bauermann --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/kexec.h | 5 + 2 files changed, 6 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1d466addb078..ea7f7fe3dccd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1094,6 +1094,7 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE + select HAVE_IMA_KEXEC if IMA help This is new version of kexec system call. This system call is file based and takes file descriptors as system call argument diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index d24b527e8c00..2bd19ccb6c43 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -100,6 +100,11 @@ struct kimage_arch { void *elf_headers; unsigned long elf_headers_mem; unsigned long elf_headers_sz; + +#ifdef CONFIG_IMA_KEXEC + phys_addr_t ima_buffer_addr; + size_t ima_buffer_size; +#endif Why do these need to be in the arch structure instead of 'struct kimage'? Currently, only powerpc and, with this patch set, arm64 have support for carrying forward IMA measurement list across kexec system call. The above fields are used for tracking IMA measurement list. Do you see a reason to move these fields to "struct kimage"? thanks, -lakshmi
Re: [PATCH] PCI: dwc: layerscape: convert to builtin_platform_driver()
Hi Saravana, On Wed, Jan 27, 2021 at 5:42 PM Saravana Kannan wrote: > On Tue, Jan 26, 2021 at 11:43 PM Geert Uytterhoeven > wrote: > > On Wed, Jan 27, 2021 at 1:44 AM Saravana Kannan > > wrote: > > > On Tue, Jan 26, 2021 at 12:50 AM Geert Uytterhoeven > > > wrote: > > > > On Mon, Jan 25, 2021 at 11:42 PM Saravana Kannan > > > > wrote: > > > > > On Mon, Jan 25, 2021 at 11:49 AM Michael Walle > > > > > wrote: > > > > > > Am 2021-01-21 12:01, schrieb Geert Uytterhoeven: > > > > > > > On Thu, Jan 21, 2021 at 1:05 AM Saravana Kannan > > > > > > > > > > > > > > wrote: > > > > > > >> On Wed, Jan 20, 2021 at 3:53 PM Michael Walle > > > > > > >> wrote: > > > > > > >> > Am 2021-01-20 20:47, schrieb Saravana Kannan: > > > > > > >> > > On Wed, Jan 20, 2021 at 11:28 AM Michael Walle > > > > > > >> > > > > > > > > >> > > wrote: > > > > > > >> > >> > > > > > > >> > >> [RESEND, fat-fingered the buttons of my mail client and > > > > > > >> > >> converted > > > > > > >> > >> all CCs to BCCs :(] > > > > > > >> > >> > > > > > > >> > >> Am 2021-01-20 20:02, schrieb Saravana Kannan: > > > > > > >> > >> > On Wed, Jan 20, 2021 at 6:24 AM Rob Herring > > > > > > >> > >> > wrote: > > > > > > >> > >> >> > > > > > > >> > >> >> On Wed, Jan 20, 2021 at 4:53 AM Michael Walle > > > > > > >> > >> >> > > > > > > >> > >> >> wrote: > > > > > > >> > >> >> > > > > > > > >> > >> >> > fw_devlink will defer the probe until all suppliers > > > > > > >> > >> >> > are ready. We can't > > > > > > >> > >> >> > use builtin_platform_driver_probe() because it doesn't > > > > > > >> > >> >> > retry after probe > > > > > > >> > >> >> > deferral. Convert it to builtin_platform_driver(). > > > > > > >> > >> >> > > > > > > >> > >> >> If builtin_platform_driver_probe() doesn't work with > > > > > > >> > >> >> fw_devlink, then > > > > > > >> > >> >> shouldn't it be fixed or removed? > > > > > > >> > >> > > > > > > > >> > >> > I was actually thinking about this too. The problem with > > > > > > >> > >> > fixing > > > > > > >> > >> > builtin_platform_driver_probe() to behave like > > > > > > >> > >> > builtin_platform_driver() is that these probe functions > > > > > > >> > >> > could be > > > > > > >> > >> > marked with __init. But there are also only 20 instances > > > > > > >> > >> > of > > > > > > >> > >> > builtin_platform_driver_probe() in the kernel: > > > > > > >> > >> > $ git grep ^builtin_platform_driver_probe | wc -l > > > > > > >> > >> > 20 > > > > > > >> > >> > > > > > > > >> > >> > So it might be easier to just fix them to not use > > > > > > >> > >> > builtin_platform_driver_probe(). > > > > > > >> > >> > > > > > > > >> > >> > Michael, > > > > > > >> > >> > > > > > > > >> > >> > Any chance you'd be willing to help me by converting all > > > > > > >> > >> > these to > > > > > > >> > >> > builtin_platform_driver() and delete > > > > > > >> > >> > builtin_platform_driver_probe()? > > > > > > >> > >> > > > > > > >> > >> If it just moving the probe function to the _driver struct > > > > > > >> > >> and > > > > > > >> > >> remove the __init annotations. I could look into that. > > > > > > >> > > > > > > > > >> > > Yup. That's pretty much it AFAICT. > > > > > > >> > > > > > > > > >> > > builtin_platform_driver_probe() also makes sure the driver > > > > > > >> > > doesn't ask > > > > > > >> > > for async probe, etc. But I doubt anyone is actually setting > > > > > > >> > > async > > > > > > >> > > flags and still using builtin_platform_driver_probe(). > > > > > > >> > > > > > > > >> > Hasn't module_platform_driver_probe() the same problem? And > > > > > > >> > there > > > > > > >> > are ~80 drivers which uses that. > > > > > > >> > > > > > > >> Yeah. The biggest problem with all of these is the __init > > > > > > >> markers. > > > > > > >> Maybe some familiar with coccinelle can help? > > > > > > > > > > > > > > And dropping them will increase memory usage. > > > > > > > > > > > > Although I do have the changes for the > > > > > > builtin_platform_driver_probe() > > > > > > ready, I don't think it makes much sense to send these unless we > > > > > > agree > > > > > > on the increased memory footprint. While there are just a few > > > > > > builtin_platform_driver_probe() and memory increase _might_ be > > > > > > negligible, there are many more module_platform_driver_probe(). > > > > > > > > > > While it's good to drop code that'll not be used past kernel init, the > > > > > module_platform_driver_probe() is going even more extreme. It doesn't > > > > > even allow deferred probe (well before kernel init is done). I don't > > > > > think that behavior is right and that's why we should delete it. Also, > > > > > > > > This construct is typically used for builtin hardware for which the > > > > dependencies are registered very early, and thus known to probe at > > > > first try (if present). > > > > > > > > > I doubt if any of these probe functions even take up 4KB of memory. > > > > > > > > How many 4 KiB pages do you have in
Re: [PATCH v15 10/10] arm64: Add IMA log information in kimage used for kexec
On Fri, Jan 15, 2021 at 09:30:17AM -0800, Lakshmi Ramasubramanian wrote: > Address and size of the buffer containing the IMA measurement log need > to be passed from the current kernel to the next kernel on kexec. > > Add address and size fields to "struct kimage_arch" for ARM64 platform > to hold the address and size of the IMA measurement log buffer. > > Update CONFIG_KEXEC_FILE to select CONFIG_HAVE_IMA_KEXEC, if CONFIG_IMA > is enabled, to indicate that the IMA measurement log information is > present in the device tree for ARM64. > > Co-developed-by: Prakhar Srivastava > Signed-off-by: Prakhar Srivastava > Signed-off-by: Lakshmi Ramasubramanian > Reviewed-by: Thiago Jung Bauermann > --- > arch/arm64/Kconfig | 1 + > arch/arm64/include/asm/kexec.h | 5 + > 2 files changed, 6 insertions(+) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index 1d466addb078..ea7f7fe3dccd 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -1094,6 +1094,7 @@ config KEXEC > config KEXEC_FILE > bool "kexec file based system call" > select KEXEC_CORE > + select HAVE_IMA_KEXEC if IMA > help > This is new version of kexec system call. This system call is > file based and takes file descriptors as system call argument > diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h > index d24b527e8c00..2bd19ccb6c43 100644 > --- a/arch/arm64/include/asm/kexec.h > +++ b/arch/arm64/include/asm/kexec.h > @@ -100,6 +100,11 @@ struct kimage_arch { > void *elf_headers; > unsigned long elf_headers_mem; > unsigned long elf_headers_sz; > + > +#ifdef CONFIG_IMA_KEXEC > + phys_addr_t ima_buffer_addr; > + size_t ima_buffer_size; > +#endif Why do these need to be in the arch structure instead of 'struct kimage'? Will
Re: [PATCH] powerpc/fault: fix wrong KUAP fault for IO_URING
Le 27/01/2021 à 15:56, Zorro Lang a écrit : On powerpc, io_uring test hit below KUAP fault on __do_page_fault. The fail source line is: if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) return SIGSEGV; The is_user() is based on user_mod(regs) only. This's not suit for io_uring, where the helper thread can assume the user app identity and could perform this fault just fine. So turn to use mm to decide if this is valid or not. I don't understand why testing is_user would be an issue. KUAP purpose it to block any unallowed access from kernel to user memory (Equivalent to SMAP on x86). So it really must be based on MSR_PR bit, that is what is_user provides. If the kernel access is legitimate, kernel should have opened userspace access then you shouldn't get this "Bug: Read fault blocked by KUAP!". As far as I understand, the fault occurs in iov_iter_fault_in_readable() which calls fault_in_pages_readable() And fault_in_pages_readable() uses __get_user() so it is a legitimate access and you really should get a KUAP fault. So the problem is somewhere else, I think you proposed patch just hides the problem, it doesn't fix it. Can you provide your vmlinux binary together with your .config ? [ 556.472666] [ cut here ] [ 556.472686] Bug: Read fault blocked by KUAP! [ 556.472697] WARNING: CPU: 1 PID: 101841 at arch/powerpc/mm/fault.c:229 __do_page_fault+0x6b4/0xcd0 [ 556.472728] Modules linked in: bonding rfkill sunrpc pseries_rng xts uio_pdrv_genirq vmx_crypto uio ip_tables xfs libcrc32c sd_mod t10_pi sg ibmvscsi ibmveth scsi_transport_srp [ 556.472816] CPU: 1 PID: 101841 Comm: io_wqe_worker-0 Tainted: GW 5.11.0-rc3+ #2 [ 556.472830] NIP: c009e7e4 LR: c009e7e0 CTR: [ 556.472842] REGS: c00016367090 TRAP: 0700 Tainted: GW (5.11.0-rc3+) [ 556.472853] MSR: 80021033 CR: 48022424 XER: 0001 [ 556.472901] CFAR: c01822ac IRQMASK: 1 GPR00: c009e7e0 c00016367330 c23fc300 0020 GPR04: c1e3c2b8 0001 0027 c007fb90 GPR08: 0023 c00024ed0900 fc464a58 GPR12: 2000 c0001ecaf280 c01caee8 c00014d547c0 GPR16: c2454018 GPR20: c1336480 bfff cb0e5800 GPR24: a8aa 0020 c0002cc38880 GPR28: 01000e3c9310 c13424c0 c000163674a0 c1e0d2c0 [ 556.473125] NIP [c009e7e4] __do_page_fault+0x6b4/0xcd0 [ 556.473139] LR [c009e7e0] __do_page_fault+0x6b0/0xcd0 [ 556.473152] Call Trace: [ 556.473168] [c00016367330] [c009e7e0] __do_page_fault+0x6b0/0xcd0 (unreliable) [ 556.473198] [c000163673e0] [c009ee3c] do_page_fault+0x3c/0x120 [ 556.473216] [c00016367430] [c000c848] handle_page_fault+0x10/0x2c [ 556.473232] --- interrupt: 300 at iov_iter_fault_in_readable+0x148/0x6f0 [ 556.473245] NIP: c08e8228 LR: c08e834c CTR: [ 556.473257] REGS: c000163674a0 TRAP: 0300 Tainted: GW (5.11.0-rc3+) [ 556.473268] MSR: 8280b033 CR: 44008482 XER: 0001 [ 556.473339] CFAR: c08e81f0 DAR: 01000e3c9310 DSISR: 0020 IRQMASK: 0 GPR00: c08e834c c00016367740 c23fc300 GPR04: c0002cc389e0 0001 0007fa4b c25bc520 GPR08: 0007fa4b 0200 fcff ffea2ad8 GPR12: 8000 c0001ecaf280 c01caee8 c00014d547c0 GPR16: c2454018 GPR20: c1336480 bfff cb0e5800 GPR24: a8aa fcff 04b1 04b1 GPR28: cb0e5888 01000e3c97c0 01000e3c9310 [ 556.473667] NIP [c08e8228] iov_iter_fault_in_readable+0x148/0x6f0 [ 556.473688] LR [c08e834c] iov_iter_fault_in_readable+0x26c/0x6f0 [ 556.473708] --- interrupt: 300 [ 556.473722] [c000163677e0] [c07154a0] iomap_write_actor+0xc0/0x280 [ 556.473770] [c00016367880] [c070fc94] iomap_apply+0x1c4/0x780 [ 556.473804] [c00016367990] [c0710330] iomap_file_buffered_write+0xa0/0x120 [ 556.473839] [c000163679e0] [c0080040791c] xfs_file_buffered_aio_write+0x314/0x5e0 [xfs] [ 556.474053] [c00016367a90] [c06d74bc] io_write+0x10c/0x460 [ 556.474101] [c00016367bb0] [c06
[PATCH] powerpc/fault: fix wrong KUAP fault for IO_URING
On powerpc, io_uring test hit below KUAP fault on __do_page_fault. The fail source line is: if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) return SIGSEGV; The is_user() is based on user_mod(regs) only. This's not suit for io_uring, where the helper thread can assume the user app identity and could perform this fault just fine. So turn to use mm to decide if this is valid or not. [ 556.472666] [ cut here ] [ 556.472686] Bug: Read fault blocked by KUAP! [ 556.472697] WARNING: CPU: 1 PID: 101841 at arch/powerpc/mm/fault.c:229 __do_page_fault+0x6b4/0xcd0 [ 556.472728] Modules linked in: bonding rfkill sunrpc pseries_rng xts uio_pdrv_genirq vmx_crypto uio ip_tables xfs libcrc32c sd_mod t10_pi sg ibmvscsi ibmveth scsi_transport_srp [ 556.472816] CPU: 1 PID: 101841 Comm: io_wqe_worker-0 Tainted: GW 5.11.0-rc3+ #2 [ 556.472830] NIP: c009e7e4 LR: c009e7e0 CTR: [ 556.472842] REGS: c00016367090 TRAP: 0700 Tainted: GW (5.11.0-rc3+) [ 556.472853] MSR: 80021033 CR: 48022424 XER: 0001 [ 556.472901] CFAR: c01822ac IRQMASK: 1 GPR00: c009e7e0 c00016367330 c23fc300 0020 GPR04: c1e3c2b8 0001 0027 c007fb90 GPR08: 0023 c00024ed0900 fc464a58 GPR12: 2000 c0001ecaf280 c01caee8 c00014d547c0 GPR16: c2454018 GPR20: c1336480 bfff cb0e5800 GPR24: a8aa 0020 c0002cc38880 GPR28: 01000e3c9310 c13424c0 c000163674a0 c1e0d2c0 [ 556.473125] NIP [c009e7e4] __do_page_fault+0x6b4/0xcd0 [ 556.473139] LR [c009e7e0] __do_page_fault+0x6b0/0xcd0 [ 556.473152] Call Trace: [ 556.473168] [c00016367330] [c009e7e0] __do_page_fault+0x6b0/0xcd0 (unreliable) [ 556.473198] [c000163673e0] [c009ee3c] do_page_fault+0x3c/0x120 [ 556.473216] [c00016367430] [c000c848] handle_page_fault+0x10/0x2c [ 556.473232] --- interrupt: 300 at iov_iter_fault_in_readable+0x148/0x6f0 [ 556.473245] NIP: c08e8228 LR: c08e834c CTR: [ 556.473257] REGS: c000163674a0 TRAP: 0300 Tainted: GW (5.11.0-rc3+) [ 556.473268] MSR: 8280b033 CR: 44008482 XER: 0001 [ 556.473339] CFAR: c08e81f0 DAR: 01000e3c9310 DSISR: 0020 IRQMASK: 0 GPR00: c08e834c c00016367740 c23fc300 GPR04: c0002cc389e0 0001 0007fa4b c25bc520 GPR08: 0007fa4b 0200 fcff ffea2ad8 GPR12: 8000 c0001ecaf280 c01caee8 c00014d547c0 GPR16: c2454018 GPR20: c1336480 bfff cb0e5800 GPR24: a8aa fcff 04b1 04b1 GPR28: cb0e5888 01000e3c97c0 01000e3c9310 [ 556.473667] NIP [c08e8228] iov_iter_fault_in_readable+0x148/0x6f0 [ 556.473688] LR [c08e834c] iov_iter_fault_in_readable+0x26c/0x6f0 [ 556.473708] --- interrupt: 300 [ 556.473722] [c000163677e0] [c07154a0] iomap_write_actor+0xc0/0x280 [ 556.473770] [c00016367880] [c070fc94] iomap_apply+0x1c4/0x780 [ 556.473804] [c00016367990] [c0710330] iomap_file_buffered_write+0xa0/0x120 [ 556.473839] [c000163679e0] [c0080040791c] xfs_file_buffered_aio_write+0x314/0x5e0 [xfs] [ 556.474053] [c00016367a90] [c06d74bc] io_write+0x10c/0x460 [ 556.474101] [c00016367bb0] [c06d80e4] io_issue_sqe+0x8d4/0x1200 [ 556.474132] [c00016367c70] [c06d8ad0] io_wq_submit_work+0xc0/0x250 [ 556.474161] [c00016367cb0] [c06e2578] io_worker_handle_work+0x498/0x800 [ 556.474192] [c00016367d40] [c06e2cdc] io_wqe_worker+0x3fc/0x4f0 [ 556.474223] [c00016367da0] [c01cb0a4] kthread+0x1c4/0x1d0 [ 556.474254] [c00016367e10] [c000dbf0] ret_from_kernel_thread+0x5c/0x6c [ 556.474286] Instruction dump: [ 556.474310] e87e0100 481287f1 6000 2fa3 419e01ec 408e0400 3c82fef4 388461d0 [ 556.474395] 3c62fef4 386362d0 480e3a69 6000 <0fe0> 386b 4bfffa08 3d220006 [ 556.474479] irq event stamp: 1280 [ 556.474505] hardirqs last enabled at (1279): [] __slab_free+0x3e4/0x570 [ 556.474540] hardirqs last disabled at (1280): [] data_access_common_virt+0x1a4/0x1c0
Re: [PATCH v11 13/13] powerpc/64s/radix: Enable huge vmalloc mappings
Nicholas Piggin writes: > Cc: linuxppc-dev@lists.ozlabs.org > Signed-off-by: Nicholas Piggin > --- Acked-by: Michael Ellerman cheers > .../admin-guide/kernel-parameters.txt | 2 ++ > arch/powerpc/Kconfig | 1 + > arch/powerpc/kernel/module.c | 21 +++ > 3 files changed, 20 insertions(+), 4 deletions(-) > > diff --git a/Documentation/admin-guide/kernel-parameters.txt > b/Documentation/admin-guide/kernel-parameters.txt > index a10b545c2070..d62df53e5200 100644 > --- a/Documentation/admin-guide/kernel-parameters.txt > +++ b/Documentation/admin-guide/kernel-parameters.txt > @@ -3225,6 +3225,8 @@ > > nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings. > > + nohugevmalloc [PPC] Disable kernel huge vmalloc mappings. > + > nosmt [KNL,S390] Disable symmetric multithreading (SMT). > Equivalent to smt=1. > > diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig > index 107bb4319e0e..781da6829ab7 100644 > --- a/arch/powerpc/Kconfig > +++ b/arch/powerpc/Kconfig > @@ -181,6 +181,7 @@ config PPC > select GENERIC_GETTIMEOFDAY > select HAVE_ARCH_AUDITSYSCALL > select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && > PPC_RADIX_MMU > + select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP > select HAVE_ARCH_JUMP_LABEL > select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 > select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 > diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c > index a211b0253cdb..07026335d24d 100644 > --- a/arch/powerpc/kernel/module.c > +++ b/arch/powerpc/kernel/module.c > @@ -87,13 +87,26 @@ int module_finalize(const Elf_Ehdr *hdr, > return 0; > } > > -#ifdef MODULES_VADDR > void *module_alloc(unsigned long size) > { > + unsigned long start = VMALLOC_START; > + unsigned long end = VMALLOC_END; > + > +#ifdef MODULES_VADDR > BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); > + start = MODULES_VADDR; > + end = MODULES_END; > +#endif > + > + /* > + * Don't do huge page allocations for modules yet until more testing > + * is done. STRICT_MODULE_RWX may require extra work to support this > + * too. > + */ > > - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, > GFP_KERNEL, > - PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, > NUMA_NO_NODE, > + return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, > + PAGE_KERNEL_EXEC, > + VM_NO_HUGE_VMAP | VM_FLUSH_RESET_PERMS, > + NUMA_NO_NODE, > __builtin_return_address(0)); > } > -#endif > -- > 2.23.0