On Sat, 2014-07-26 at 10:52 +0100, Robert de Bath wrote:
> On Fri, 25 Jul 2014, Robert de Bath wrote:
> 
> > On Fri, 25 Jul 2014, Ben Hutchings wrote:
> >
> >> I had an idea how to unblock this, and finally got round to trying it,
> >> and it seems to work.  That is, we build in x32 support but require a
> >> run-time parameter to enable.  So, please try the attached patch
> >> (against the sid branch), adding "syscall.x32=y" to the kernel command
> >> line.
> Okay,
> With the flag set the kernel boots happily and runs gcc-mx32 executables.
> 
> With the flag off ...
> First (simple) thing; with the patch in the kernel tree there is no
> configuration to default the x32 switch to on. Thinking ahead to when
> this may be well tested, I think it'd be nice if there were a .config
> option to default this patch to enabling the x32 syscalls and have a
> kernel command line option to disable them in "special cases".
> 
> More importantly ...  this is rather ugly; I think you're going to get
> complaints when ld.so segfaults.
> 
> You may want to reinstate the ENOEXEC error for the 'wrong sort' of 
> executables.
[...]

What do you mean, reinstate?  This is the same behaviour you get at
present.  Well, here's a new version that might do that.

Ben.

-- 
Ben Hutchings
73.46% of all statistics are made up.
Index: linux/debian/changelog
===================================================================
--- linux/debian/changelog	(revision 21631)
+++ linux/debian/changelog	(working copy)
@@ -1,3 +1,11 @@
+linux (3.14.13-2+x32) UNRELEASED; urgency=medium
+
+  [ Ben Hutchings ]
+  * [amd64] Enable X86_X32 (Closes: #708070)
+  * [amd64] syscall: Make x32 syscall support conditional on a kernel parameter
+
+ -- Ben Hutchings <b...@decadent.org.uk>  Fri, 25 Jul 2014 01:48:06 +0100
+
 linux (3.14.13-2) unstable; urgency=medium
 
   [ Aurelien Jarno ]
Index: linux/debian/config/kernelarch-x86/config-arch-64
===================================================================
--- linux/debian/config/kernelarch-x86/config-arch-64	(revision 21631)
+++ linux/debian/config/kernelarch-x86/config-arch-64	(working copy)
@@ -15,7 +15,7 @@
 CONFIG_NUMA_EMU=y
 CONFIG_PCI_MMCONFIG=y
 CONFIG_ISA_DMA_API=y
-# CONFIG_X86_X32 is not set
+CONFIG_X86_X32=y
 
 ##
 ## file: arch/x86/Kconfig.cpu
Index: linux/debian/patches/bugfix/x86/x86-reject-x32-executables-if-x32-abi-not-supported.patch
===================================================================
--- linux/debian/patches/bugfix/x86/x86-reject-x32-executables-if-x32-abi-not-supported.patch	(revision 0)
+++ linux/debian/patches/bugfix/x86/x86-reject-x32-executables-if-x32-abi-not-supported.patch	(working copy)
@@ -0,0 +1,18 @@
+From: Ben Hutchings <b...@decadent.org.uk>
+Date: Sat, 26 Jul 2014 15:03:11 +0100
+Subject: x86: Reject x32 executables if x32 ABI not supported
+
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -155,8 +155,9 @@ do {						\
+ #define elf_check_arch(x)			\
+ 	((x)->e_machine == EM_X86_64)
+ 
+-#define compat_elf_check_arch(x)		\
+-	(elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
++#define compat_elf_check_arch(x)					\
++	(elf_check_arch_ia32(x) ||					\
++	 (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
+ 
+ #if __USER32_DS != __USER_DS
+ # error "The following code assumes __USER32_DS == __USER_DS"
Index: linux/debian/patches/features/x86/x86-syscall-make-x32-syscall-support-conditional.patch
===================================================================
--- linux/debian/patches/features/x86/x86-syscall-make-x32-syscall-support-conditional.patch	(revision 0)
+++ linux/debian/patches/features/x86/x86-syscall-make-x32-syscall-support-conditional.patch	(working copy)
@@ -0,0 +1,164 @@
+From: Ben Hutchings <b...@decadent.org.uk>
+Date: Fri, 25 Jul 2014 01:16:15 +0100
+Subject: x86/syscall: Make x32 syscall support conditional on a kernel parameter
+Bug-Debian: https://bugs.debian.org/708070
+
+Enabling x32 in the standard amd64 kernel would increase its attack
+surface while provide no benefit to the vast majority of its users.
+No-one seems interested in regularly checking for vulnerabilities
+specific to x32 (at least no-one with a white hat).
+
+Still, adding another flavour just to turn on x32 seems wasteful.  And
+the only difference on syscall entry is whether we mask the x32 flag
+out of the syscall number before range-checking it.
+
+So replace the mask (andl) instruction with a nop and add a kernel
+parameter "syscall.x32" which allows it to be turned back on again.
+
+Change the comparison instruction to cmpq because the upper 32 bits
+may or may not be cleared by the previous instruction.
+
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -618,12 +618,14 @@ GLOBAL(system_call_after_swapgs)
+ 	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ 	jnz tracesys
+ system_call_fastpath:
+-#if __SYSCALL_MASK == ~0
+-	cmpq $__NR_syscall_max,%rax
+-#else
+-	andl $__SYSCALL_MASK,%eax
+-	cmpl $__NR_syscall_max,%eax
++#if __SYSCALL_MASK != ~0
++	.globl system_call_fast_maybe_mask
++	.globl system_call_fast_masked
++system_call_fast_maybe_mask:
++	.byte P6_NOP5_ATOMIC
++system_call_fast_masked:
+ #endif
++	cmpq $__NR_syscall_max,%rax
+ 	ja badsys
+ 	movq %r10,%rcx
+ 	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
+@@ -737,12 +739,14 @@ tracesys:
+ 	 */
+ 	LOAD_ARGS ARGOFFSET, 1
+ 	RESTORE_REST
+-#if __SYSCALL_MASK == ~0
+-	cmpq $__NR_syscall_max,%rax
+-#else
+-	andl $__SYSCALL_MASK,%eax
+-	cmpl $__NR_syscall_max,%eax
++#if __SYSCALL_MASK != ~0
++	.globl system_call_trace_maybe_mask
++	.globl system_call_trace_masked
++system_call_trace_maybe_mask:
++	.byte P6_NOP5_ATOMIC
++system_call_trace_masked:
+ #endif
++	cmpq $__NR_syscall_max,%rax
+ 	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
+ 	movq %r10,%rcx	/* fixup for C */
+ 	call *sys_call_table(,%rax,8)
+@@ -813,6 +817,18 @@ int_restore_rest:
+ 	CFI_ENDPROC
+ END(system_call)
+ 
++#if __SYSCALL_MASK != ~0
++	/*
++	 * This replaces the nops before the syscall range check
++	 * if syscall.x32 is set
++	 */
++	.globl system_call_mask
++	.globl system_call_mask_end
++system_call_mask:
++	andl $__SYSCALL_MASK,%eax
++system_call_mask_end:
++#endif
++
+ 	.macro FORK_LIKE func
+ ENTRY(stub_\func)
+ 	CFI_STARTPROC
+--- a/arch/x86/kernel/syscall_64.c
++++ b/arch/x86/kernel/syscall_64.c
+@@ -3,8 +3,13 @@
+ #include <linux/linkage.h>
+ #include <linux/sys.h>
+ #include <linux/cache.h>
++#include <linux/moduleparam.h>
++#undef MODULE_PARAM_PREFIX
++#define MODULE_PARAM_PREFIX "syscall."
++#include <linux/bug.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/syscall.h>
++#include <asm/alternative.h>
+ 
+ #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+ 
+@@ -30,3 +35,42 @@ asmlinkage const sys_call_ptr_t sys_call
+ 	[0 ... __NR_syscall_max] = &sys_ni_syscall,
+ #include <asm/syscalls_64.h>
+ };
++
++#ifdef CONFIG_X86_X32_ABI
++
++/* Maybe enable x32 syscalls */
++
++bool x32_enabled = false;
++
++extern char system_call_fast_masked[], system_call_fast_maybe_mask[],
++	system_call_trace_masked[], system_call_trace_maybe_mask[],
++	system_call_mask_end[], system_call_mask[];
++
++static int __init
++set_x32_enabled(const char *val, const struct kernel_param *kp)
++{
++	int ret = param_set_bool(val, kp);
++
++	BUG_ON(system_call_fast_masked - system_call_fast_maybe_mask != 5);
++	BUG_ON(system_call_trace_masked - system_call_trace_maybe_mask != 5);
++	BUG_ON(system_call_mask_end - system_call_mask != 5);
++
++	if (x32_enabled) {
++		text_poke_early(system_call_fast_maybe_mask,
++				system_call_mask, 5);
++		text_poke_early(system_call_trace_maybe_mask,
++				system_call_mask, 5);
++		pr_info("Enabled x32 syscalls\n");
++	}
++
++	return ret;
++}
++
++static const struct kernel_param_ops x32_enabled_ops = {
++	.get = param_get_bool,
++	.set = set_x32_enabled
++};
++
++late_param_cb(x32, &x32_enabled_ops, &x32_enabled, 0444);
++
++#endif
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -149,6 +149,12 @@ do {						\
+ 
+ #else /* CONFIG_X86_32 */
+ 
++#ifdef CONFIG_X86_X32_ABI
++extern bool x32_enabled;
++#else
++#define x32_enabled 0
++#endif
++
+ /*
+  * This is used to ensure we don't load something for the wrong architecture.
+  */
+@@ -157,7 +163,7 @@ do {						\
+ 
+ #define compat_elf_check_arch(x)					\
+ 	(elf_check_arch_ia32(x) ||					\
+-	 (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
++	 (x32_enabled && (x)->e_machine == EM_X86_64))
+ 
+ #if __USER32_DS != __USER_DS
+ # error "The following code assumes __USER32_DS == __USER_DS"
Index: linux/debian/patches/series
===================================================================
--- linux/debian/patches/series	(revision 21631)
+++ linux/debian/patches/series	(working copy)
@@ -110,3 +110,5 @@
 features/mips/MIPS-Malta-hang-on-halt.patch
 features/mips/MIPS-Malta-support-powering-down.patch
 features/mips/MIPS-Loongson-3-Add-Loongson-LS3A-RS780E-1-way-machi.patch
+bugfix/x86/x86-reject-x32-executables-if-x32-abi-not-supported.patch
+features/x86/x86-syscall-make-x32-syscall-support-conditional.patch

Attachment: signature.asc
Description: This is a digitally signed message part

Reply via email to