Enable unwinding of user space using back chain on s390. Based on arch_stack_walk_user_common() in arch/s390/kernel/stacktrace.c.
Note that an invalid RA obtained from the stack frame pointed to by the back chain is not a valid indication that the IP is still early in the function prologue and a fallback to the RA and SP register r14 and r15 contents should be made. Signed-off-by: Jens Remus <[email protected]> --- Notes (jremus): Changes in RFC v2: - Adjusted to latest unwind user changes. - Use struct stack_frame_user and struct stack_frame_vdso_wrapper from asm/stacktrace.h. - In topmost frame do not fallback to RA (and SP) register values if RA is invalid. This is not a valid indication for early prologue. - In topmost frame use RA and SP register values if they match those saved in the frame. This indicates early prologue. arch/s390/Kconfig | 1 + arch/s390/kernel/Makefile | 2 + arch/s390/kernel/unwind_user_backchain.c | 112 +++++++++++++++++++++++ 3 files changed, 115 insertions(+) create mode 100644 arch/s390/kernel/unwind_user_backchain.c diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 52d3f3b3e086..5aeb2abd390f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -246,6 +246,7 @@ config S390 select HAVE_SETUP_PER_CPU_AREA select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SYSCALL_TRACEPOINTS + select HAVE_UNWIND_USER_BACKCHAIN select HAVE_UNWIND_USER_SFRAME select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING_IDLE diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index eb06ff888314..eb662e95c5fd 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -83,6 +83,8 @@ obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o perf_pai_ext.o obj-$(CONFIG_TRACEPOINTS) += trace.o +obj-$(CONFIG_HAVE_UNWIND_USER_BACKCHAIN) += unwind_user_backchain.o + # vdso obj-y += vdso64/ obj-$(CONFIG_COMPAT) += vdso32/ diff --git a/arch/s390/kernel/unwind_user_backchain.c b/arch/s390/kernel/unwind_user_backchain.c new file mode 100644 index 000000000000..4e10ca43ea36 --- /dev/null +++ b/arch/s390/kernel/unwind_user_backchain.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "backchain: " fmt + +#include <asm/asm-offsets.h> +#include <asm/stacktrace.h> +#include <linux/security.h> +#include <linux/unwind_user.h> +#include <linux/unwind_user_backchain.h> + +/** + * ip_invalid - Perform some basic checks whether an instruction pointer (IP) + * taken from an unreliable source is invalid + * @ip: The instruction pointer to be validated + * + * returns whether the instruction pointer is invalid + */ +static inline bool ip_invalid(unsigned long ip) +{ + /* Architecture requires IP to be 2-byte aligned. */ + if (ip & 1) + return true; + if (ip < mmap_min_addr) + return true; + if (ip >= current->mm->context.asce_limit) + return true; + return false; +} + +/** + * ip_within_vdso - Check whether an instruction pointer (IP) is within vDSO + * @ip: The instruction pointer + * + * returns whether the instruction pointer is within vDSO + */ +static inline bool ip_within_vdso(unsigned long ip) +{ + return in_range(ip, current->mm->context.vdso_base, vdso_text_size()); +} + +/** + * arch_unwind_user_next_backchain - Unwind one frame using s390 back chain + * @state: The unwind user state + * + * returns zero when successful, otherwise -EINVAL. + */ +int arch_unwind_user_next_backchain(struct unwind_user_state *state) +{ + struct stack_frame_user __user *sf; + unsigned long sp, ra; + + sf = (void __user *)state->sp; + + /* + * In topmost frame check whether IP in early prologue, RA and SP + * registers saved, and no new stack frame allocated. + */ + if (state->topmost) { + unsigned long ra_reg; + + if (__get_user(ra, (unsigned long __user *)&sf->gprs[8])) + return -EINVAL; + if (__get_user(sp, (unsigned long __user *)&sf->gprs[9])) + return -EINVAL; + if (unwind_user_get_ra_reg(&ra_reg)) + return -EINVAL; + if (ra == ra_reg && sp == state->sp) + goto done; + } + + if (__get_user(sp, (unsigned long __user *)&sf->back_chain)) + return -EINVAL; + if (!sp && ip_within_vdso(state->ip)) { + /* + * Assume non-standard vDSO user wrapper stack frame. + * See vDSO user wrapper code for details. + */ + struct stack_frame_vdso_wrapper *sf_vdso = (void __user *)sf; + + if (__get_user(ra, (unsigned long __user *)&sf_vdso->return_address)) + return -EINVAL; + sf = (void __user *)((unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD); + if (__get_user(sp, (unsigned long __user *)&sf->back_chain)) + return -EINVAL; + } else if (!sp) { + /* Assume outermost frame reached. */ + state->done = true; + return 0; + } else { + /* + * Assume IP past prologue and new stack frame allocated. + * Follow back chain, which then equals the SP at entry. + * Skips caller if wrong in topmost frame. + */ + sf = (void __user *)sp; + if (__get_user(ra, (unsigned long __user *)&sf->gprs[8])) + return -EINVAL; + /* Skip validation: ABI requires SP to be saved as well. */ + } + +done: + /* Validate SP and RA (ABI requires SP to be 8-byte aligned). */ + if (sp & 7 || ip_invalid(ra)) + return -EINVAL; + + state->ip = ra; + state->sp = sp; + state->fp = 0; /* Cannot unwind FP. */ + state->topmost = false; + + return 0; +} -- 2.51.0
