Re: [PATCH] mm: slub: re-initialize randomized freelist sequence in calculate_sizes

2020-08-19 Thread Thomas Garnier
 I am not super familiar with the sysfs setup here but the random
sequence should work as expected. One minor comment below.

Reviewed-by: Thomas Garnier 

On Wed, Aug 19, 2020 at 1:26 PM Andrew Morton  wrote:
>
>
> (cc Thomas and linux-mm)
>
> On Sat,  8 Aug 2020 13:50:30 +0400 kpark3...@gmail.com wrote:
>
> > From: Sahara 
> >
> > Slab cache flags are exported to sysfs and are allowed to get modified
> > from userspace. Some of those may call calculate_sizes function because
> > the changed flag can take an effect on slab object size and layout,
> > which means kmem_cache may have different order and objects.
> > The freelist pointer corruption occurs if some slab flags are modified
> > while CONFIG_SLAB_FREELIST_RANDOM is turned on.
> >
> >  $ echo 0 > /sys/kernel/slab/zs_handle/store_user
> >  $ echo 0 > /sys/kernel/slab/zspage/store_user
> >  $ mkswap /dev/block/zram0
> >  $ swapon /dev/block/zram0 -p 32758
> >
> >  
> > =
> >  BUG zs_handle (Not tainted): Freepointer corrupt
> >  
> > -
> >
> >  Disabling lock debugging due to kernel taint
> >  INFO: Slab 0xffbf29603600 objects=102 used=102 fp=0x 
> > flags=0x0200
> >  INFO: Object 0xffca580d8d78 @offset=3448 fp=0xffca580d8ed0
> >
> >  Redzone f3cddd6c: bb bb bb bb bb bb bb bb  
> > 
> >  Object 82d5d74e: 6b 6b 6b 6b 6b 6b 6b a5  
> > kkk.
> >  Redzone 8fd80359: bb bb bb bb bb bb bb bb  
> > 
> >  Padding c7f56047: 5a 5a 5a 5a 5a 5a 5a 5a  
> > 
> >
> > In this example, an Android device tries to use zram as a swap and to
> > turn off store_user in order to reduce the slub object size.
> > When calculate_sizes is called in kmem_cache_open, size, order and
> > objects for zs_handle is:
> >  size:360, order:0, objects:22
> > However, if the SLAB_STORE_USER bit is cleared in store_user_store:
> >  size: 56, order:1, objects:73
> >
> > All the size, order, and objects is changed by calculate_sizes(), but
> > the size of the random_seq array is still old value(22). As a result,
> > out-of-bound array access can occur at shuffle_freelist() when slab
> > allocation is requested.
> >
> > This patch fixes the problem by re-allocating the random_seq array
> > with re-calculated correct objects value.
> >
> > Fixes: 210e7a43fa905 ("mm: SLUB freelist randomization")
> > Reported-by: Ari-Pekka Verta 
> > Reported-by: Timo Simola 
> > Signed-off-by: Sahara 
> > ---
> >  mm/slub.c | 23 ---
> >  1 file changed, 16 insertions(+), 7 deletions(-)
> >
> > diff --git a/mm/slub.c b/mm/slub.c
> > index f226d66408ee..be1e4d6682b8 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3704,7 +3704,22 @@ static int calculate_sizes(struct kmem_cache *s, int 
> > forced_order)
> >   if (oo_objects(s->oo) > oo_objects(s->max))
> >   s->max = s->oo;
> >
> > - return !!oo_objects(s->oo);
> > + if (!oo_objects(s->oo))
> > + return 0;
> > +
> > + /*
> > +  * Initialize the pre-computed randomized freelist if slab is up.
> > +  * If the randomized freelist random_seq is already initialized,
> > +  * free and re-initialize it with re-calculated value.
> > +  */
> > + if (slab_state >= UP) {
> > + if (s->random_seq)
> > + cache_random_seq_destroy(s);

kfree(NULL) is a noop, so you don't need to check s->random_seq.

> > + if (init_cache_random_seq(s))
> > + return 0;
> > + }
> > +
> > + return 1;
> >  }
> >
> >  static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
> > @@ -3748,12 +3763,6 @@ static int kmem_cache_open(struct kmem_cache *s, 
> > slab_flags_t flags)
> >   s->remote_node_defrag_ratio = 1000;
> >  #endif
> >
> > - /* Initialize the pre-computed randomized freelist if slab is up */
> > - if (slab_state >= UP) {
> > - if (init_cache_random_seq(s))
> > - goto error;
> > - }
> > -
> >   if (!init_kmem_cache_nodes(s))
> >   goto error;
> >
> > --
> > 2.17.1



--
Thomas


Re: [PATCH v9 04/11] x86/entry/64: Adapt assembly for PIE support

2019-08-05 Thread Thomas Garnier
On Mon, Aug 5, 2019 at 10:28 AM Borislav Petkov  wrote:
>
> On Tue, Jul 30, 2019 at 12:12:48PM -0700, Thomas Garnier wrote:
> > Change the assembly code to use only relative references of symbols for the
> > kernel to be PIE compatible.
> >
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range below 0xffff8000.
> >
> > Signed-off-by: Thomas Garnier 
> > Reviewed-by: Kees Cook 
> > ---
> >  arch/x86/entry/entry_64.S | 16 +++-
> >  1 file changed, 11 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> > index 3f5a978a02a7..4b588a902009 100644
> > --- a/arch/x86/entry/entry_64.S
> > +++ b/arch/x86/entry/entry_64.S
> > @@ -1317,7 +1317,8 @@ ENTRY(error_entry)
> >   movl%ecx, %eax  /* zero extend */
> >   cmpq%rax, RIP+8(%rsp)
> >   je  .Lbstep_iret
> > - cmpq$.Lgs_change, RIP+8(%rsp)
> > + leaq.Lgs_change(%rip), %rcx
> > + cmpq%rcx, RIP+8(%rsp)
> >   jne .Lerror_entry_done
> >
> >   /*
> > @@ -1514,10 +1515,10 @@ ENTRY(nmi)
> >* resume the outer NMI.
> >*/
> >
> > - movq$repeat_nmi, %rdx
> > + leaqrepeat_nmi(%rip), %rdx
> >   cmpq8(%rsp), %rdx
> >   ja  1f
> > - movq$end_repeat_nmi, %rdx
> > + leaqend_repeat_nmi(%rip), %rdx
> >   cmpq8(%rsp), %rdx
> >   ja  nested_nmi_out
> >  1:
> > @@ -1571,7 +1572,8 @@ nested_nmi:
> >   pushq   %rdx
> >   pushfq
> >   pushq   $__KERNEL_CS
> > - pushq   $repeat_nmi
> > + leaqrepeat_nmi(%rip), %rdx
> > + pushq   %rdx
> >
> >   /* Put stack back */
> >   addq$(6*8), %rsp
> > @@ -1610,7 +1612,11 @@ first_nmi:
> >   addq$8, (%rsp)  /* Fix up RSP */
> >   pushfq  /* RFLAGS */
> >   pushq   $__KERNEL_CS/* CS */
> > - pushq   $1f /* RIP */
> > + pushq   $0  /* Future return address */
> > + pushq   %rax/* Save RAX */
> > + leaq1f(%rip), %rax  /* RIP */
> > + movq%rax, 8(%rsp)   /* Put 1f on return address */
> > + popq%rax/* Restore RAX */
>
> Can't you just use a callee-clobbered reg here instead of preserving
> %rax?

I saw that %rdx was used for temporary usage and restored before the
end so I assumed that it was not an option.

>
> --
> Regards/Gruss,
> Boris.
>
> Good mailing practices for 400: avoid top-posting and trim the reply.


[PATCH v9 07/11] x86/acpi: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
Reviewed-by: Kees Cook 
---
 arch/x86/kernel/acpi/wakeup_64.S | 31 ---
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index b0715c3ac18d..3ec6c1b74ad4 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -15,7 +15,7 @@
 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 */
 ENTRY(wakeup_long64)
-   movqsaved_magic, %rax
+   movqsaved_magic(%rip), %rax
movq$0x123456789abcdef0, %rdx
cmpq%rdx, %rax
jne bogus_64_magic
@@ -26,14 +26,14 @@ ENTRY(wakeup_long64)
movw%ax, %es
movw%ax, %fs
movw%ax, %gs
-   movqsaved_rsp, %rsp
+   movqsaved_rsp(%rip), %rsp
 
-   movqsaved_rbx, %rbx
-   movqsaved_rdi, %rdi
-   movqsaved_rsi, %rsi
-   movqsaved_rbp, %rbp
+   movqsaved_rbx(%rip), %rbx
+   movqsaved_rdi(%rip), %rdi
+   movqsaved_rsi(%rip), %rsi
+   movqsaved_rbp(%rip), %rbp
 
-   movqsaved_rip, %rax
+   movqsaved_rip(%rip), %rax
jmp *%rax
 ENDPROC(wakeup_long64)
 
@@ -46,7 +46,7 @@ ENTRY(do_suspend_lowlevel)
xorl%eax, %eax
callsave_processor_state
 
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -65,13 +65,14 @@ ENTRY(do_suspend_lowlevel)
pushfq
popqpt_regs_flags(%rax)
 
-   movq$.Lresume_point, saved_rip(%rip)
+   leaq.Lresume_point(%rip), %rax
+   movq%rax, saved_rip(%rip)
 
-   movq%rsp, saved_rsp
-   movq%rbp, saved_rbp
-   movq%rbx, saved_rbx
-   movq%rdi, saved_rdi
-   movq%rsi, saved_rsi
+   movq%rsp, saved_rsp(%rip)
+   movq%rbp, saved_rbp(%rip)
+   movq%rbx, saved_rbx(%rip)
+   movq%rdi, saved_rdi(%rip)
+   movq%rsi, saved_rsi(%rip)
 
addq$8, %rsp
movl$3, %edi
@@ -83,7 +84,7 @@ ENTRY(do_suspend_lowlevel)
.align 4
 .Lresume_point:
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqsaved_context_cr4(%rax), %rbx
movq%rbx, %cr4
movqsaved_context_cr3(%rax), %rbx
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 05/11] x86: pm-trace - Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change assembly to use the new _ASM_MOVABS macro instead of _ASM_MOV for
the assembly to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/include/asm/pm-trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/pm-trace.h b/arch/x86/include/asm/pm-trace.h
index bfa32aa428e5..972070806ce9 100644
--- a/arch/x86/include/asm/pm-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -8,7 +8,7 @@
 do {   \
if (pm_trace_enabled) { \
const void *tracedata;  \
-   asm volatile(_ASM_MOV " $1f,%0\n"   \
+   asm volatile(_ASM_MOVABS " $1f,%0\n"\
 ".section .tracedata,\"a\"\n"  \
 "1:\t.word %c1\n\t"\
 _ASM_PTR " %c2\n"  \
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 08/11] x86/boot/64: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Early at boot, the kernel is mapped at a temporary address while preparing
the page table. To know the changes needed for the page table with KASLR,
the boot code calculate the difference between the expected address of the
kernel and the one chosen by KASLR. It does not work with PIE because all
symbols in code are relatives. Instead of getting the future relocated
virtual address, you will get the current temporary mapping.
Instructions were changed to have absolute 64-bit references.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/kernel/head_64.S | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index f3d3e9646a99..9a3f96566eb2 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -88,8 +88,10 @@ startup_64:
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(early_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(early_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
jmp 1f
+
 ENTRY(secondary_startup_64)
UNWIND_HINT_EMPTY
/*
@@ -118,7 +120,8 @@ ENTRY(secondary_startup_64)
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(init_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(init_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
 1:
 
/* Enable PAE mode, PGE and LA57 */
@@ -136,7 +139,7 @@ ENTRY(secondary_startup_64)
movq%rax, %cr3
 
/* Ensure I am executing from virtual addresses */
-   movq$1f, %rax
+   movabs  $1f, %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
 1:
@@ -233,11 +236,12 @@ ENTRY(secondary_startup_64)
 *  REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
 *  address given in m16:64.
 */
-   pushq   $.Lafter_lret   # put return address on stack for unwinder
+   movabs  $.Lafter_lret, %rax
+   pushq   %rax# put return address on stack for unwinder
xorl%ebp, %ebp  # clear frame pointer
-   movqinitial_code(%rip), %rax
+   leaqinitial_code(%rip), %rax
pushq   $__KERNEL_CS# set correct cs
-   pushq   %rax# target address in negative space
+   pushq   (%rax)  # target address in negative space
lretq
 .Lafter_lret:
 END(secondary_startup_64)
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 06/11] x86/CPU: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/processor.h | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6e0a3b43d027..bf333d62889e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -713,11 +713,13 @@ static inline void sync_core(void)
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
-   "pushq $1f\n\t"
+   "leaq 1f(%%rip), %q0\n\t"
+   "pushq %q0\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
-   : "=" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+   : "=" (tmp), ASM_CALL_CONSTRAINT
+   : : "cc", "memory");
 #endif
 }
 
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 00/11] x86: PIE support to extend KASLR randomization

2019-07-30 Thread Thomas Garnier
Minor changes based on feedback and rebase from v8.

Splitting the previous serie in two. This part contains assembly code
changes required for PIE but without any direct dependencies with the
rest of the patchset.

Changes:
 - patch v9 (assembly):
   - Moved to relative reference for sync_core based on feedback.
   - x86/crypto had multiple algorithms deleted, removed PIE changes to them.
   - fix typo on comment end line.
 - patch v8 (assembly):
   - Fix issues in crypto changes (thanks to Eric Biggers).
   - Remove unnecessary jump table change.
   - Change author and signoff to chromium email address.
 - patch v7 (assembly):
   - Split patchset and reorder changes.
 - patch v6:
   - Rebase on latest changes in jump tables and crypto.
   - Fix wording on couple commits.
   - Revisit checkpatch warnings.
   - Moving to @chromium.org.
 - patch v5:
   - Adapt new crypto modules for PIE.
   - Improve per-cpu commit message.
   - Fix xen 32-bit build error with .quad.
   - Remove extra code for ftrace.
 - patch v4:
   - Simplify early boot by removing global variables.
   - Modify the mcount location script for __mcount_loc intead of the address
 read in the ftrace implementation.
   - Edit commit description to explain better where the kernel can be located.
   - Streamlined the testing done on each patch proposal. Always testing
 hibernation, suspend, ftrace and kprobe to ensure no regressions.
 - patch v3:
   - Update on message to describe longer term PIE goal.
   - Minor change on ftrace if condition.
   - Changed code using xchgq.
 - patch v2:
   - Adapt patch to work post KPTI and compiler changes
   - Redo all performance testing with latest configs and compilers
   - Simplify mov macro on PIE (MOVABS now)
   - Reduce GOT footprint
 - patch v1:
   - Simplify ftrace implementation.
   - Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
 - rfc v3:
   - Use --emit-relocs instead of -pie to reduce dynamic relocation space on
 mapped memory. It also simplifies the relocation process.
   - Move the start the module section next to the kernel. Remove the need for
 -mcmodel=large on modules. Extends module space from 1 to 2G maximum.
   - Support for XEN PVH as 32-bit relocations can be ignored with
 --emit-relocs.
   - Support for GOT relocations previously done automatically with -pie.
   - Remove need for dynamic PLT in modules.
   - Support dymamic GOT for modules.
 - rfc v2:
   - Add support for global stack cookie while compiler default to fs without
 mcmodel=kernel
   - Change patch 7 to correctly jump out of the identity mapping on kexec load
 preserve.

These patches make some of the changes necessary to build the kernel as
Position Independent Executable (PIE) on x86_64. Another patchset will
add the PIE option and larger architecture changes.

The patches:
 - 1, 3-11: Change in assembly code to be PIE compliant.
 - 2: Add a new _ASM_MOVABS macro to fetch a symbol address generically.

diffstat:
 crypto/aegis128-aesni-asm.S |6 +-
 crypto/aesni-intel_asm.S|8 +--
 crypto/aesni-intel_avx-x86_64.S |3 -
 crypto/camellia-aesni-avx-asm_64.S  |   42 +++
 crypto/camellia-aesni-avx2-asm_64.S |   44 
 crypto/camellia-x86_64-asm_64.S |8 +--
 crypto/cast5-avx-x86_64-asm_64.S|   50 ++
 crypto/cast6-avx-x86_64-asm_64.S|   44 +---
 crypto/des3_ede-asm_64.S|   96 
 crypto/ghash-clmulni-intel_asm.S|4 -
 crypto/glue_helper-asm-avx.S|4 -
 crypto/glue_helper-asm-avx2.S   |6 +-
 crypto/sha256-avx2-asm.S|   18 --
 entry/entry_64.S|   16 --
 include/asm/alternative.h   |6 +-
 include/asm/asm.h   |1 
 include/asm/paravirt_types.h|   25 +++--
 include/asm/pm-trace.h  |2 
 include/asm/processor.h |6 +-
 kernel/acpi/wakeup_64.S |   31 ++-
 kernel/head_64.S|   16 +++---
 kernel/relocate_kernel_64.S |2 
 power/hibernate_asm_64.S|4 -
 23 files changed, 261 insertions(+), 181 deletions(-)

Patchset is based on next-20190729.




[PATCH v9 11/11] x86/alternatives: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly options to work with pointers instead of integers.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/alternative.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/alternative.h 
b/arch/x86/include/asm/alternative.h
index 094fbc9c0b1c..28a838106e5f 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -243,7 +243,7 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
 /* Like alternative_io, but for replacing a direct call with another one. */
 #define alternative_call(oldfunc, newfunc, feature, output, input...)  \
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
-   : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+   : output : [old] "X" (oldfunc), [new] "X" (newfunc), ## input)
 
 /*
  * Like alternative_call, but there are two features and respective functions.
@@ -256,8 +256,8 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2)\
: output, ASM_CALL_CONSTRAINT \
-   : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
- [new2] "i" (newfunc2), ## input)
+   : [old] "X" (oldfunc), [new1] "X" (newfunc1), \
+ [new2] "X" (newfunc2), ## input)
 
 /*
  * use this macro(s) if you need more than one output parameter
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 02/11] x86: Add macro to get symbol address for PIE support

2019-07-30 Thread Thomas Garnier
Add a new _ASM_MOVABS macro to fetch a symbol address. It will be used
to replace "_ASM_MOV $, %dst" code construct that are not
compatible with PIE.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/asm.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 3ff577c0b102..3a686057e882 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -30,6 +30,7 @@
 #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
 
 #define _ASM_MOV   __ASM_SIZE(mov)
+#define _ASM_MOVABS__ASM_SEL(movl, movabsq)
 #define _ASM_INC   __ASM_SIZE(inc)
 #define _ASM_DEC   __ASM_SIZE(dec)
 #define _ASM_ADD   __ASM_SIZE(add)
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 03/11] x86: relocate_kernel - Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly code to use only absolute references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/kernel/relocate_kernel_64.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index c51ccff5cd01..c72889b09840 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -206,7 +206,7 @@ identity_mapped:
movq%rax, %cr3
lea PAGE_SIZE(%r8), %rsp
callswap_pages
-   movq$virtual_mapped, %rax
+   movabsq $virtual_mapped, %rax
pushq   %rax
ret
 
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 09/11] x86/power/64: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
Reviewed-by: Kees Cook 
---
 arch/x86/power/hibernate_asm_64.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index a4d5eb0a7ece..796cd19d575b 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -23,7 +23,7 @@
 #include 
 
 ENTRY(swsusp_arch_suspend)
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -114,7 +114,7 @@ ENTRY(restore_registers)
movq%rax, %cr4;  # turn PGE back on
 
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqpt_regs_sp(%rax), %rsp
movqpt_regs_bp(%rax), %rbp
movqpt_regs_si(%rax), %rsi
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 10/11] x86/paravirt: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
if PIE is enabled, switch the paravirt assembly constraints to be
compatible. The %c/i constrains generate smaller code so is kept by
default.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Juergen Gross 
---
 arch/x86/include/asm/paravirt_types.h | 25 +
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 70b654f3ffe5..fd7dc37d0010 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -338,9 +338,25 @@ extern struct paravirt_patch_template pv_ops;
 #define PARAVIRT_PATCH(x)  \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
 
+#ifdef CONFIG_X86_PIE
+#define paravirt_opptr_call "a"
+#define paravirt_opptr_type "p"
+
+/*
+ * Alternative patching requires a maximum of 7 bytes but the relative call is
+ * only 6 bytes. If PIE is enabled, add an additional nop to the call
+ * instruction to ensure patching is possible.
+ */
+#define PARAVIRT_CALL_POST  "nop;"
+#else
+#define paravirt_opptr_call "c"
+#define paravirt_opptr_type "i"
+#define PARAVIRT_CALL_POST  ""
+#endif
+
 #define paravirt_type(op)  \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)),\
-   [paravirt_opptr] "i" (&(pv_ops.op))
+   [paravirt_opptr] paravirt_opptr_type (&(pv_ops.op))
 #define paravirt_clobber(clobber)  \
[paravirt_clobber] "i" (clobber)
 
@@ -379,9 +395,10 @@ int paravirt_disable_iospace(void);
  * offset into the paravirt_patch_template structure, and can therefore be
  * freely converted back into a structure offset.
  */
-#define PARAVIRT_CALL  \
-   ANNOTATE_RETPOLINE_SAFE \
-   "call *%c[paravirt_opptr];"
+#define PARAVIRT_CALL  \
+   ANNOTATE_RETPOLINE_SAFE \
+   "call *%" paravirt_opptr_call "[paravirt_opptr];"   \
+   PARAVIRT_CALL_POST
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
-- 
2.22.0.770.g0f2c4a37fd-goog



[PATCH v9 04/11] x86/entry/64: Adapt assembly for PIE support

2019-07-30 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/entry/entry_64.S | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3f5a978a02a7..4b588a902009 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1317,7 +1317,8 @@ ENTRY(error_entry)
movl%ecx, %eax  /* zero extend */
cmpq%rax, RIP+8(%rsp)
je  .Lbstep_iret
-   cmpq$.Lgs_change, RIP+8(%rsp)
+   leaq.Lgs_change(%rip), %rcx
+   cmpq%rcx, RIP+8(%rsp)
jne .Lerror_entry_done
 
/*
@@ -1514,10 +1515,10 @@ ENTRY(nmi)
 * resume the outer NMI.
 */
 
-   movq$repeat_nmi, %rdx
+   leaqrepeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  1f
-   movq$end_repeat_nmi, %rdx
+   leaqend_repeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  nested_nmi_out
 1:
@@ -1571,7 +1572,8 @@ nested_nmi:
pushq   %rdx
pushfq
pushq   $__KERNEL_CS
-   pushq   $repeat_nmi
+   leaqrepeat_nmi(%rip), %rdx
+   pushq   %rdx
 
/* Put stack back */
addq$(6*8), %rsp
@@ -1610,7 +1612,11 @@ first_nmi:
addq$8, (%rsp)  /* Fix up RSP */
pushfq  /* RFLAGS */
pushq   $__KERNEL_CS/* CS */
-   pushq   $1f /* RIP */
+   pushq   $0  /* Future return address */
+   pushq   %rax/* Save RAX */
+   leaq1f(%rip), %rax  /* RIP */
+   movq%rax, 8(%rsp)   /* Put 1f on return address */
+   popq%rax/* Restore RAX */
iretq   /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
 1:
-- 
2.22.0.770.g0f2c4a37fd-goog



Re: [PATCH v8 06/11] x86/CPU: Adapt assembly for PIE support

2019-07-09 Thread Thomas Garnier
On Tue, Jul 9, 2019 at 11:39 AM Alexey Dobriyan  wrote:
>
> On Mon, Jul 08, 2019 at 12:35:13PM -0700, Thomas Garnier wrote:
> > On Mon, Jul 8, 2019 at 12:09 PM Alexey Dobriyan  wrote:
> > >
> > > Thomas Garnier wrote:
> > > > - "pushq $1f\n\t"
> > > > + "movabsq $1f, %q0\n\t"
> > > > + "pushq %q0\n\t"
> > > >   "iretq\n\t"
> > > >   UNWIND_HINT_RESTORE
> > > >   "1:"
> > >
> > > Fake PIE. True PIE looks like this:
> >
> > I used movabsq in couple assembly changes where the memory context is
> > unclear and relative reference might lead to issues. It happened on
> > early boot and hibernation save/restore paths. Do you think a relative
> > reference in this function will always be accurate?
>
> As long as iretq target is not too far it should be OK.
>
> I'm not really sure which issues can pop up.
>
> IRETQ is 64-bit only, RIP-relative addressing is 64-bit only.
> Assembler (hopefully) will error compilation if target is too far.
>
> And it is shorter than movabsq.

Agree, I will change it and run some tests for the next iteration.

>
> > > 81022d70 :
> > > 81022d70:   8c d0   moveax,ss
> > > 81022d72:   50  push   rax
> > > 81022d73:   54  push   rsp
> > > 81022d74:   48 83 04 24 08  addQWORD PTR [rsp],0x8
> > > 81022d79:   9c  pushf
> > > 81022d7a:   8c c8   moveax,cs
> > > 81022d7c:   50  push   rax
> > > 81022d7d:  ===> 48 8d 05 03 00 00 00learax,[rip+0x3]  
> > >   # 81022d87 
> > > 81022d84:   50  push   rax
> > > 81022d85:   48 cf   iretq
> > > 81022d87:   c3  ret
> > >
> > > Signed-off-by: Alexey Dobriyan 
> > >
> > > --- a/arch/x86/include/asm/processor.h
> > > +++ b/arch/x86/include/asm/processor.h
> > > @@ -710,7 +710,8 @@ static inline void sync_core(void)
> > > "pushfq\n\t"
> > > "mov %%cs, %0\n\t"
> > > "pushq %q0\n\t"
> > > -   "pushq $1f\n\t"
> > > +   "leaq 1f(%%rip), %q0\n\t"
> > > +   "pushq %q0\n\t"
> > > "iretq\n\t"
> > > UNWIND_HINT_RESTORE
> > > "1:"


Re: [PATCH v8 06/11] x86/CPU: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
On Mon, Jul 8, 2019 at 12:09 PM Alexey Dobriyan  wrote:
>
> Thomas Garnier wrote:
> > - "pushq $1f\n\t"
> > + "movabsq $1f, %q0\n\t"
> > + "pushq %q0\n\t"
> >   "iretq\n\t"
> >   UNWIND_HINT_RESTORE
> >   "1:"
>
> Fake PIE. True PIE looks like this:

I used movabsq in couple assembly changes where the memory context is
unclear and relative reference might lead to issues. It happened on
early boot and hibernation save/restore paths. Do you think a relative
reference in this function will always be accurate?

>
> 81022d70 :
> 81022d70:   8c d0   moveax,ss
> 81022d72:   50  push   rax
> 81022d73:   54  push   rsp
> 81022d74:   48 83 04 24 08  addQWORD PTR [rsp],0x8
> 81022d79:   9c  pushf
> 81022d7a:   8c c8   moveax,cs
> 81022d7c:   50  push   rax
> 81022d7d:  ===> 48 8d 05 03 00 00 00learax,[rip+0x3]# 
> 81022d87 
> 81022d84:   50  push   rax
> 81022d85:   48 cf   iretq
> 81022d87:   c3  ret
>
> Signed-off-by: Alexey Dobriyan 
>
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -710,7 +710,8 @@ static inline void sync_core(void)
> "pushfq\n\t"
> "mov %%cs, %0\n\t"
> "pushq %q0\n\t"
> -   "pushq $1f\n\t"
> +   "leaq 1f(%%rip), %q0\n\t"
> +   "pushq %q0\n\t"
> "iretq\n\t"
> UNWIND_HINT_RESTORE
> "1:"


[PATCH v8 03/11] x86: relocate_kernel - Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly code to use only absolute references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/kernel/relocate_kernel_64.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index c51ccff5cd01..c72889b09840 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -206,7 +206,7 @@ identity_mapped:
movq%rax, %cr3
lea PAGE_SIZE(%r8), %rsp
callswap_pages
-   movq$virtual_mapped, %rax
+   movabsq $virtual_mapped, %rax
pushq   %rax
ret
 
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 08/11] x86/boot/64: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Early at boot, the kernel is mapped at a temporary address while preparing
the page table. To know the changes needed for the page table with KASLR,
the boot code calculate the difference between the expected address of the
kernel and the one chosen by KASLR. It does not work with PIE because all
symbols in code are relatives. Instead of getting the future relocated
virtual address, you will get the current temporary mapping.
Instructions were changed to have absolute 64-bit references.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/kernel/head_64.S | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index bcd206c8ac90..64a4f0a22b20 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -90,8 +90,10 @@ startup_64:
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(early_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(early_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
jmp 1f
+
 ENTRY(secondary_startup_64)
UNWIND_HINT_EMPTY
/*
@@ -120,7 +122,8 @@ ENTRY(secondary_startup_64)
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(init_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(init_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
 1:
 
/* Enable PAE mode, PGE and LA57 */
@@ -138,7 +141,7 @@ ENTRY(secondary_startup_64)
movq%rax, %cr3
 
/* Ensure I am executing from virtual addresses */
-   movq$1f, %rax
+   movabs  $1f, %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
 1:
@@ -235,11 +238,12 @@ ENTRY(secondary_startup_64)
 *  REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
 *  address given in m16:64.
 */
-   pushq   $.Lafter_lret   # put return address on stack for unwinder
+   movabs  $.Lafter_lret, %rax
+   pushq   %rax# put return address on stack for unwinder
xorl%ebp, %ebp  # clear frame pointer
-   movqinitial_code(%rip), %rax
+   leaqinitial_code(%rip), %rax
pushq   $__KERNEL_CS# set correct cs
-   pushq   %rax# target address in negative space
+   pushq   (%rax)  # target address in negative space
lretq
 .Lafter_lret:
 END(secondary_startup_64)
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 04/11] x86/entry/64: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/entry/entry_64.S | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 0ea4831a72a4..9f55a359c896 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1309,7 +1309,8 @@ ENTRY(error_entry)
movl%ecx, %eax  /* zero extend */
cmpq%rax, RIP+8(%rsp)
je  .Lbstep_iret
-   cmpq$.Lgs_change, RIP+8(%rsp)
+   leaq.Lgs_change(%rip), %rcx
+   cmpq%rcx, RIP+8(%rsp)
jne .Lerror_entry_done
 
/*
@@ -1506,10 +1507,10 @@ ENTRY(nmi)
 * resume the outer NMI.
 */
 
-   movq$repeat_nmi, %rdx
+   leaqrepeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  1f
-   movq$end_repeat_nmi, %rdx
+   leaqend_repeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  nested_nmi_out
 1:
@@ -1563,7 +1564,8 @@ nested_nmi:
pushq   %rdx
pushfq
pushq   $__KERNEL_CS
-   pushq   $repeat_nmi
+   leaqrepeat_nmi(%rip), %rdx
+   pushq   %rdx
 
/* Put stack back */
addq$(6*8), %rsp
@@ -1602,7 +1604,11 @@ first_nmi:
addq$8, (%rsp)  /* Fix up RSP */
pushfq  /* RFLAGS */
pushq   $__KERNEL_CS/* CS */
-   pushq   $1f /* RIP */
+   pushq   $0  /* Future return address */
+   pushq   %rax/* Save RAX */
+   leaq1f(%rip), %rax  /* RIP */
+   movq%rax, 8(%rsp)   /* Put 1f on return address */
+   popq%rax/* Restore RAX */
iretq   /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
 1:
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 05/11] x86: pm-trace - Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change assembly to use the new _ASM_MOVABS macro instead of _ASM_MOV for
the assembly to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
 arch/x86/include/asm/pm-trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/pm-trace.h b/arch/x86/include/asm/pm-trace.h
index bfa32aa428e5..972070806ce9 100644
--- a/arch/x86/include/asm/pm-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -8,7 +8,7 @@
 do {   \
if (pm_trace_enabled) { \
const void *tracedata;  \
-   asm volatile(_ASM_MOV " $1f,%0\n"   \
+   asm volatile(_ASM_MOVABS " $1f,%0\n"\
 ".section .tracedata,\"a\"\n"  \
 "1:\t.word %c1\n\t"\
 _ASM_PTR " %c2\n"  \
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 09/11] x86/power/64: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
Reviewed-by: Kees Cook 
---
 arch/x86/power/hibernate_asm_64.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index a4d5eb0a7ece..796cd19d575b 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -23,7 +23,7 @@
 #include 
 
 ENTRY(swsusp_arch_suspend)
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -114,7 +114,7 @@ ENTRY(restore_registers)
movq%rax, %cr4;  # turn PGE back on
 
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqpt_regs_sp(%rax), %rsp
movqpt_regs_bp(%rax), %rbp
movqpt_regs_si(%rax), %rsi
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 02/11] x86: Add macro to get symbol address for PIE support

2019-07-08 Thread Thomas Garnier
Add a new _ASM_MOVABS macro to fetch a symbol address. It will be used
to replace "_ASM_MOV $, %dst" code construct that are not
compatible with PIE.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/asm.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 3ff577c0b102..3a686057e882 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -30,6 +30,7 @@
 #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
 
 #define _ASM_MOV   __ASM_SIZE(mov)
+#define _ASM_MOVABS__ASM_SEL(movl, movabsq)
 #define _ASM_INC   __ASM_SIZE(inc)
 #define _ASM_DEC   __ASM_SIZE(dec)
 #define _ASM_ADD   __ASM_SIZE(add)
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 07/11] x86/acpi: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
Reviewed-by: Kees Cook 
---
 arch/x86/kernel/acpi/wakeup_64.S | 31 ---
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index b0715c3ac18d..3ec6c1b74ad4 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -15,7 +15,7 @@
 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 */
 ENTRY(wakeup_long64)
-   movqsaved_magic, %rax
+   movqsaved_magic(%rip), %rax
movq$0x123456789abcdef0, %rdx
cmpq%rdx, %rax
jne bogus_64_magic
@@ -26,14 +26,14 @@ ENTRY(wakeup_long64)
movw%ax, %es
movw%ax, %fs
movw%ax, %gs
-   movqsaved_rsp, %rsp
+   movqsaved_rsp(%rip), %rsp
 
-   movqsaved_rbx, %rbx
-   movqsaved_rdi, %rdi
-   movqsaved_rsi, %rsi
-   movqsaved_rbp, %rbp
+   movqsaved_rbx(%rip), %rbx
+   movqsaved_rdi(%rip), %rdi
+   movqsaved_rsi(%rip), %rsi
+   movqsaved_rbp(%rip), %rbp
 
-   movqsaved_rip, %rax
+   movqsaved_rip(%rip), %rax
jmp *%rax
 ENDPROC(wakeup_long64)
 
@@ -46,7 +46,7 @@ ENTRY(do_suspend_lowlevel)
xorl%eax, %eax
callsave_processor_state
 
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -65,13 +65,14 @@ ENTRY(do_suspend_lowlevel)
pushfq
popqpt_regs_flags(%rax)
 
-   movq$.Lresume_point, saved_rip(%rip)
+   leaq.Lresume_point(%rip), %rax
+   movq%rax, saved_rip(%rip)
 
-   movq%rsp, saved_rsp
-   movq%rbp, saved_rbp
-   movq%rbx, saved_rbx
-   movq%rdi, saved_rdi
-   movq%rsi, saved_rsi
+   movq%rsp, saved_rsp(%rip)
+   movq%rbp, saved_rbp(%rip)
+   movq%rbx, saved_rbx(%rip)
+   movq%rdi, saved_rdi(%rip)
+   movq%rsi, saved_rsi(%rip)
 
addq$8, %rsp
movl$3, %edi
@@ -83,7 +84,7 @@ ENTRY(do_suspend_lowlevel)
.align 4
 .Lresume_point:
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqsaved_context_cr4(%rax), %rbx
movq%rbx, %cr4
movqsaved_context_cr3(%rax), %rbx
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 10/11] x86/paravirt: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
if PIE is enabled, switch the paravirt assembly constraints to be
compatible. The %c/i constrains generate smaller code so is kept by
default.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Juergen Gross 
---
 arch/x86/include/asm/paravirt_types.h | 25 +
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 946f8f1f1efc..5ec59abc5cb5 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -343,9 +343,25 @@ extern struct paravirt_patch_template pv_ops;
 #define PARAVIRT_PATCH(x)  \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
 
+#ifdef CONFIG_X86_PIE
+#define paravirt_opptr_call "a"
+#define paravirt_opptr_type "p"
+
+/*
+ * Alternative patching requires a maximum of 7 bytes but the relative call is
+ * only 6 bytes. If PIE is enabled, add an additional nop to the call
+ * instruction to ensure patching is possible.
+ * */
+#define PARAVIRT_CALL_POST  "nop;"
+#else
+#define paravirt_opptr_call "c"
+#define paravirt_opptr_type "i"
+#define PARAVIRT_CALL_POST  ""
+#endif
+
 #define paravirt_type(op)  \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)),\
-   [paravirt_opptr] "i" (&(pv_ops.op))
+   [paravirt_opptr] paravirt_opptr_type (&(pv_ops.op))
 #define paravirt_clobber(clobber)  \
[paravirt_clobber] "i" (clobber)
 
@@ -384,9 +400,10 @@ int paravirt_disable_iospace(void);
  * offset into the paravirt_patch_template structure, and can therefore be
  * freely converted back into a structure offset.
  */
-#define PARAVIRT_CALL  \
-   ANNOTATE_RETPOLINE_SAFE \
-   "call *%c[paravirt_opptr];"
+#define PARAVIRT_CALL  \
+   ANNOTATE_RETPOLINE_SAFE \
+   "call *%" paravirt_opptr_call "[paravirt_opptr];"   \
+   PARAVIRT_CALL_POST
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 06/11] x86/CPU: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible. Use the new _ASM_MOVABS macro instead of
the 'mov $symbol, %dst' construct.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/processor.h | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3eab6ece52b4..3e2154b0e09f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -713,11 +713,13 @@ static inline void sync_core(void)
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
-   "pushq $1f\n\t"
+   "movabsq $1f, %q0\n\t"
+   "pushq %q0\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
-   : "=" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+   : "=" (tmp), ASM_CALL_CONSTRAINT
+   : : "cc", "memory");
 #endif
 }
 
-- 
2.22.0.410.gd8fdbe21b5-goog



[PATCH v8 11/11] x86/alternatives: Adapt assembly for PIE support

2019-07-08 Thread Thomas Garnier
Change the assembly options to work with pointers instead of integers.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/alternative.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/alternative.h 
b/arch/x86/include/asm/alternative.h
index 094fbc9c0b1c..28a838106e5f 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -243,7 +243,7 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
 /* Like alternative_io, but for replacing a direct call with another one. */
 #define alternative_call(oldfunc, newfunc, feature, output, input...)  \
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
-   : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+   : output : [old] "X" (oldfunc), [new] "X" (newfunc), ## input)
 
 /*
  * Like alternative_call, but there are two features and respective functions.
@@ -256,8 +256,8 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2)\
: output, ASM_CALL_CONSTRAINT \
-   : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
- [new2] "i" (newfunc2), ## input)
+   : [old] "X" (oldfunc), [new1] "X" (newfunc1), \
+ [new2] "X" (newfunc2), ## input)
 
 /*
  * use this macro(s) if you need more than one output parameter
-- 
2.22.0.410.gd8fdbe21b5-goog



Re: [PATCH v7 11/12] x86/paravirt: Adapt assembly for PIE support

2019-05-29 Thread Thomas Garnier
On Sun, May 26, 2019 at 10:47 PM Juergen Gross  wrote:
>
> On 21/05/2019 01:19, Thomas Garnier wrote:
> > From: Thomas Garnier 
> >
> > if PIE is enabled, switch the paravirt assembly constraints to be
> > compatible. The %c/i constrains generate smaller code so is kept by
> > default.
> >
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range below 0xffff8000.
> >
> > Signed-off-by: Thomas Garnier 
>
> Acked-by: Juergen Gross 

Thanks Juergen.

>
>
> Juergen


Re: [PATCH v7 01/12] x86/crypto: Adapt assembly for PIE support

2019-05-29 Thread Thomas Garnier
On Wed, May 22, 2019 at 1:55 PM Eric Biggers  wrote:
>
> On Wed, May 22, 2019 at 01:47:07PM -0700, Thomas Garnier wrote:
> > On Mon, May 20, 2019 at 9:06 PM Eric Biggers  wrote:
> > >
> > > On Mon, May 20, 2019 at 04:19:26PM -0700, Thomas Garnier wrote:
> > > > diff --git a/arch/x86/crypto/sha256-avx2-asm.S 
> > > > b/arch/x86/crypto/sha256-avx2-asm.S
> > > > index 1420db15dcdd..2ced4b2f6c76 100644
> > > > --- a/arch/x86/crypto/sha256-avx2-asm.S
> > > > +++ b/arch/x86/crypto/sha256-avx2-asm.S
> > > > @@ -588,37 +588,42 @@ last_block_enter:
> > > >   mov INP, _INP(%rsp)
> > > >
> > > >   ## schedule 48 input dwords, by doing 3 rounds of 12 each
> > > > - xor SRND, SRND
> > > > + leaqK256(%rip), SRND
> > > > + ## loop1 upper bound
> > > > + leaqK256+3*4*32(%rip), INP
> > > >
> > > >  .align 16
> > > >  loop1:
> > > > - vpaddd  K256+0*32(SRND), X0, XFER
> > > > + vpaddd  0*32(SRND), X0, XFER
> > > >   vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
> > > >   FOUR_ROUNDS_AND_SCHED   _XFER + 0*32
> > > >
> > > > - vpaddd  K256+1*32(SRND), X0, XFER
> > > > + vpaddd  1*32(SRND), X0, XFER
> > > >   vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
> > > >   FOUR_ROUNDS_AND_SCHED   _XFER + 1*32
> > > >
> > > > - vpaddd  K256+2*32(SRND), X0, XFER
> > > > + vpaddd  2*32(SRND), X0, XFER
> > > >   vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
> > > >   FOUR_ROUNDS_AND_SCHED   _XFER + 2*32
> > > >
> > > > - vpaddd  K256+3*32(SRND), X0, XFER
> > > > + vpaddd  3*32(SRND), X0, XFER
> > > >   vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
> > > >   FOUR_ROUNDS_AND_SCHED   _XFER + 3*32
> > > >
> > > >   add $4*32, SRND
> > > > - cmp $3*4*32, SRND
> > > > + cmp INP, SRND
> > > >   jb  loop1
> > > >
> > > > + ## loop2 upper bound
> > > > + leaqK256+4*4*32(%rip), INP
> > > > +
> > > >  loop2:
> > > >   ## Do last 16 rounds with no scheduling
> > > > - vpaddd  K256+0*32(SRND), X0, XFER
> > > > + vpaddd  0*32(SRND), X0, XFER
> > > >   vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
> > > >   DO_4ROUNDS  _XFER + 0*32
> > > >
> > > > - vpaddd  K256+1*32(SRND), X1, XFER
> > > > + vpaddd  1*32(SRND), X1, XFER
> > > >   vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
> > > >   DO_4ROUNDS  _XFER + 1*32
> > > >   add $2*32, SRND
> > > > @@ -626,7 +631,7 @@ loop2:
> > > >   vmovdqa X2, X0
> > > >   vmovdqa X3, X1
> > > >
> > > > - cmp $4*4*32, SRND
> > > > + cmp INP, SRND
> > > >   jb  loop2
> > > >
> > > >   mov _CTX(%rsp), CTX
> > >
> > > There is a crash in sha256-avx2-asm.S with this patch applied.  Looks 
> > > like the
> > > %rsi register is being used for two different things at the same time: 
> > > 'INP' and
> > > 'y3'?  You should be able to reproduce by booting a kernel configured 
> > > with:
> > >
> > > CONFIG_CRYPTO_SHA256_SSSE3=y
> > > # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
> >
> > Thanks for testing the patch. I couldn't reproduce this crash, can you
> > share the whole .config or share any other specifics of your testing
> > setup?
> >
>
> I attached the .config I used.  It reproduces on v5.2-rc1 with just this patch
> applied.  The machine you're using does have AVX2 support, right?  If you're
> using QEMU, did you make sure to pass '-cpu host'?

Thanks for your help offline on this Eric. I was able to repro the
issue and fix it, it will be part of the next iteration. You were
right that esi was used later on, I simplified the code in this
context and ran more testing on all CONFIG_CRYPTO_* options.

>
> - Eric


Re: [PATCH v7 03/12] x86: Add macro to get symbol address for PIE support

2019-05-22 Thread Thomas Garnier
On Mon, May 20, 2019 at 8:13 PM  wrote:
>
> On May 20, 2019 4:19:28 PM PDT, Thomas Garnier  wrote:
> >From: Thomas Garnier 
> >
> >Add a new _ASM_MOVABS macro to fetch a symbol address. It will be used
> >to replace "_ASM_MOV $, %dst" code construct that are not
> >compatible with PIE.
> >
> >Signed-off-by: Thomas Garnier 
> >---
> > arch/x86/include/asm/asm.h | 1 +
> > 1 file changed, 1 insertion(+)
> >
> >diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
> >index 3ff577c0b102..3a686057e882 100644
> >--- a/arch/x86/include/asm/asm.h
> >+++ b/arch/x86/include/asm/asm.h
> >@@ -30,6 +30,7 @@
> > #define _ASM_ALIGN__ASM_SEL(.balign 4, .balign 8)
> >
> > #define _ASM_MOV  __ASM_SIZE(mov)
> >+#define _ASM_MOVABS   __ASM_SEL(movl, movabsq)
> > #define _ASM_INC  __ASM_SIZE(inc)
> > #define _ASM_DEC  __ASM_SIZE(dec)
> > #define _ASM_ADD  __ASM_SIZE(add)
>
> This is just about *always* wrong on x86-86. We should be using leaq 
> sym(%rip),%reg. If it isn't reachable by leaq, then it is a non-PIE symbol 
> like percpu. You do have to keep those distinct!

Yes, I agree. This patch is just having a shortcut when it is a
non-PIE symbol. The other patches try to separate the use cases where
a leaq sym(%rip) would work versus the need for a movabsq. There are
multiple cases where relative references are not possible because the
memory layout is different (hibernation, early boot or others).

> --
> Sent from my Android device with K-9 Mail. Please excuse my brevity.


Re: [PATCH v7 02/12] x86: Use symbol name in jump table for PIE support

2019-05-20 Thread Thomas Garnier
On Mon, May 20, 2019 at 4:20 PM Thomas Garnier  wrote:
>
> From: Thomas Garnier 
>
> Replace the %c constraint with %P. The %c is incompatible with PIE
> because it implies an immediate value whereas %P reference a symbol.
> Change the _ASM_PTR reference to .long for expected relocation size and
> add a long padding to ensure entry alignment.
>
> Position Independent Executable (PIE) support will allow to extend the
> KASLR randomization range below 0xffff8000.
>
> Signed-off-by: Thomas Garnier 
> ---
>  arch/x86/include/asm/jump_label.h | 8 
>  1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/include/asm/jump_label.h 
> b/arch/x86/include/asm/jump_label.h
> index 65191ce8e1cf..e47fad8ee632 100644
> --- a/arch/x86/include/asm/jump_label.h
> +++ b/arch/x86/include/asm/jump_label.h
> @@ -25,9 +25,9 @@ static __always_inline bool arch_static_branch(struct 
> static_key *key, bool bran
> ".pushsection __jump_table,  \"aw\" \n\t"
> _ASM_ALIGN "\n\t"
> ".long 1b - ., %l[l_yes] - . \n\t"
> -   _ASM_PTR "%c0 + %c1 - .\n\t"
> +   _ASM_PTR "%P0 - .\n\t"
> ".popsection \n\t"
> -   : :  "i" (key), "i" (branch) : : l_yes);
> +   : :  "X" (&((char *)key)[branch]) : : l_yes);
>
> return false;
>  l_yes:
> @@ -42,9 +42,9 @@ static __always_inline bool arch_static_branch_jump(struct 
> static_key *key, bool
> ".pushsection __jump_table,  \"aw\" \n\t"
> _ASM_ALIGN "\n\t"
> ".long 1b - ., %l[l_yes] - . \n\t"
> -   _ASM_PTR "%c0 + %c1 - .\n\t"
> +   _ASM_PTR "%P0 - .\n\t"
> ".popsection \n\t"
> -   : :  "i" (key), "i" (branch) : : l_yes);
> +   : : "X" (&((char *)key)[branch]) : : l_yes);
>
> return false;
>  l_yes:
> --
> 2.21.0.1020.gf2820cf01a-goog
>

Realized I forgot to address a feedback from the previous iteration on
this specific patch. Ignore it I will work to check if it can be
remove on the next iteration.


-- 
Thomas


[PATCH v7 11/12] x86/paravirt: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

if PIE is enabled, switch the paravirt assembly constraints to be
compatible. The %c/i constrains generate smaller code so is kept by
default.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/paravirt_types.h | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 2474e434a6f7..93be18bdb63e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -343,9 +343,17 @@ extern struct paravirt_patch_template pv_ops;
 #define PARAVIRT_PATCH(x)  \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
 
+#ifdef CONFIG_X86_PIE
+#define paravirt_opptr_call "a"
+#define paravirt_opptr_type "p"
+#else
+#define paravirt_opptr_call "c"
+#define paravirt_opptr_type "i"
+#endif
+
 #define paravirt_type(op)  \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)),\
-   [paravirt_opptr] "i" (&(pv_ops.op))
+   [paravirt_opptr] paravirt_opptr_type (&(pv_ops.op))
 #define paravirt_clobber(clobber)  \
[paravirt_clobber] "i" (clobber)
 
@@ -393,7 +401,7 @@ int paravirt_disable_iospace(void);
  */
 #define PARAVIRT_CALL  \
ANNOTATE_RETPOLINE_SAFE \
-   "call *%c[paravirt_opptr];"
+   "call *%" paravirt_opptr_call "[paravirt_opptr];"
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 07/12] x86/CPU: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible. Use the new _ASM_MOVABS macro instead of
the 'mov $symbol, %dst' construct.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/processor.h | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c34a35c78618..5490a6ead17c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -710,11 +710,13 @@ static inline void sync_core(void)
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
-   "pushq $1f\n\t"
+   "movabsq $1f, %q0\n\t"
+   "pushq %q0\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
-   : "=" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+   : "=" (tmp), ASM_CALL_CONSTRAINT
+   : : "cc", "memory");
 #endif
 }
 
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 12/12] x86/alternatives: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly options to work with pointers instead of integers.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/alternative.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/alternative.h 
b/arch/x86/include/asm/alternative.h
index 094fbc9c0b1c..28a838106e5f 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -243,7 +243,7 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
 /* Like alternative_io, but for replacing a direct call with another one. */
 #define alternative_call(oldfunc, newfunc, feature, output, input...)  \
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
-   : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+   : output : [old] "X" (oldfunc), [new] "X" (newfunc), ## input)
 
 /*
  * Like alternative_call, but there are two features and respective functions.
@@ -256,8 +256,8 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2)\
: output, ASM_CALL_CONSTRAINT \
-   : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
- [new2] "i" (newfunc2), ## input)
+   : [old] "X" (oldfunc), [new1] "X" (newfunc1), \
+ [new2] "X" (newfunc2), ## input)
 
 /*
  * use this macro(s) if you need more than one output parameter
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 03/12] x86: Add macro to get symbol address for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Add a new _ASM_MOVABS macro to fetch a symbol address. It will be used
to replace "_ASM_MOV $, %dst" code construct that are not
compatible with PIE.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/asm.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 3ff577c0b102..3a686057e882 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -30,6 +30,7 @@
 #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
 
 #define _ASM_MOV   __ASM_SIZE(mov)
+#define _ASM_MOVABS__ASM_SEL(movl, movabsq)
 #define _ASM_INC   __ASM_SIZE(inc)
 #define _ASM_DEC   __ASM_SIZE(dec)
 #define _ASM_ADD   __ASM_SIZE(add)
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 10/12] x86/power/64: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
---
 arch/x86/power/hibernate_asm_64.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index 3008baa2fa95..9ed980efef72 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -24,7 +24,7 @@
 #include 
 
 ENTRY(swsusp_arch_suspend)
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -115,7 +115,7 @@ ENTRY(restore_registers)
movq%rax, %cr4;  # turn PGE back on
 
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqpt_regs_sp(%rax), %rsp
movqpt_regs_bp(%rax), %rbp
movqpt_regs_si(%rax), %rsi
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 09/12] x86/boot/64: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Early at boot, the kernel is mapped at a temporary address while preparing
the page table. To know the changes needed for the page table with KASLR,
the boot code calculate the difference between the expected address of the
kernel and the one chosen by KASLR. It does not work with PIE because all
symbols in code are relatives. Instead of getting the future relocated
virtual address, you will get the current temporary mapping.
Instructions were changed to have absolute 64-bit references.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/head_64.S | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index bcd206c8ac90..64a4f0a22b20 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -90,8 +90,10 @@ startup_64:
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(early_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(early_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
jmp 1f
+
 ENTRY(secondary_startup_64)
UNWIND_HINT_EMPTY
/*
@@ -120,7 +122,8 @@ ENTRY(secondary_startup_64)
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(init_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(init_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
 1:
 
/* Enable PAE mode, PGE and LA57 */
@@ -138,7 +141,7 @@ ENTRY(secondary_startup_64)
movq%rax, %cr3
 
/* Ensure I am executing from virtual addresses */
-   movq$1f, %rax
+   movabs  $1f, %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
 1:
@@ -235,11 +238,12 @@ ENTRY(secondary_startup_64)
 *  REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
 *  address given in m16:64.
 */
-   pushq   $.Lafter_lret   # put return address on stack for unwinder
+   movabs  $.Lafter_lret, %rax
+   pushq   %rax# put return address on stack for unwinder
xorl%ebp, %ebp  # clear frame pointer
-   movqinitial_code(%rip), %rax
+   leaqinitial_code(%rip), %rax
pushq   $__KERNEL_CS# set correct cs
-   pushq   %rax# target address in negative space
+   pushq   (%rax)  # target address in negative space
lretq
 .Lafter_lret:
 END(secondary_startup_64)
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 02/12] x86: Use symbol name in jump table for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Replace the %c constraint with %P. The %c is incompatible with PIE
because it implies an immediate value whereas %P reference a symbol.
Change the _ASM_PTR reference to .long for expected relocation size and
add a long padding to ensure entry alignment.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/jump_label.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/jump_label.h 
b/arch/x86/include/asm/jump_label.h
index 65191ce8e1cf..e47fad8ee632 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -25,9 +25,9 @@ static __always_inline bool arch_static_branch(struct 
static_key *key, bool bran
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
-   _ASM_PTR "%c0 + %c1 - .\n\t"
+   _ASM_PTR "%P0 - .\n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : :  "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
@@ -42,9 +42,9 @@ static __always_inline bool arch_static_branch_jump(struct 
static_key *key, bool
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
-   _ASM_PTR "%c0 + %c1 - .\n\t"
+   _ASM_PTR "%P0 - .\n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : : "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 08/12] x86/acpi: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
---
 arch/x86/kernel/acpi/wakeup_64.S | 31 ---
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 510fa12aab73..e080e943e295 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -14,7 +14,7 @@
 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 */
 ENTRY(wakeup_long64)
-   movqsaved_magic, %rax
+   movqsaved_magic(%rip), %rax
movq$0x123456789abcdef0, %rdx
cmpq%rdx, %rax
jne bogus_64_magic
@@ -25,14 +25,14 @@ ENTRY(wakeup_long64)
movw%ax, %es
movw%ax, %fs
movw%ax, %gs
-   movqsaved_rsp, %rsp
+   movqsaved_rsp(%rip), %rsp
 
-   movqsaved_rbx, %rbx
-   movqsaved_rdi, %rdi
-   movqsaved_rsi, %rsi
-   movqsaved_rbp, %rbp
+   movqsaved_rbx(%rip), %rbx
+   movqsaved_rdi(%rip), %rdi
+   movqsaved_rsi(%rip), %rsi
+   movqsaved_rbp(%rip), %rbp
 
-   movqsaved_rip, %rax
+   movqsaved_rip(%rip), %rax
jmp *%rax
 ENDPROC(wakeup_long64)
 
@@ -45,7 +45,7 @@ ENTRY(do_suspend_lowlevel)
xorl%eax, %eax
callsave_processor_state
 
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -64,13 +64,14 @@ ENTRY(do_suspend_lowlevel)
pushfq
popqpt_regs_flags(%rax)
 
-   movq$.Lresume_point, saved_rip(%rip)
+   leaq.Lresume_point(%rip), %rax
+   movq%rax, saved_rip(%rip)
 
-   movq%rsp, saved_rsp
-   movq%rbp, saved_rbp
-   movq%rbx, saved_rbx
-   movq%rdi, saved_rdi
-   movq%rsi, saved_rsi
+   movq%rsp, saved_rsp(%rip)
+   movq%rbp, saved_rbp(%rip)
+   movq%rbx, saved_rbx(%rip)
+   movq%rdi, saved_rdi(%rip)
+   movq%rsi, saved_rsi(%rip)
 
addq$8, %rsp
movl$3, %edi
@@ -82,7 +83,7 @@ ENTRY(do_suspend_lowlevel)
.align 4
 .Lresume_point:
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqsaved_context_cr4(%rax), %rbx
movq%rbx, %cr4
movqsaved_context_cr3(%rax), %rbx
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 06/12] x86: pm-trace - Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change assembly to use the new _ASM_MOVABS macro instead of _ASM_MOV for
the assembly to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/pm-trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/pm-trace.h b/arch/x86/include/asm/pm-trace.h
index bfa32aa428e5..972070806ce9 100644
--- a/arch/x86/include/asm/pm-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -8,7 +8,7 @@
 do {   \
if (pm_trace_enabled) { \
const void *tracedata;  \
-   asm volatile(_ASM_MOV " $1f,%0\n"   \
+   asm volatile(_ASM_MOVABS " $1f,%0\n"\
 ".section .tracedata,\"a\"\n"  \
 "1:\t.word %c1\n\t"\
 _ASM_PTR " %c2\n"  \
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 00/12] x86: PIE support to extend KASLR randomization

2019-05-20 Thread Thomas Garnier
Splitting the previous serie in two. This part contains assembly code
changes required for PIE but without any direct dependencies with the
rest of the patchset.

Changes:
 - patch v7 (assembly):
   - Split patchset and reorder changes.
 - patch v6:
   - Rebase on latest changes in jump tables and crypto.
   - Fix wording on couple commits.
   - Revisit checkpatch warnings.
   - Moving to @chromium.org.
 - patch v5:
   - Adapt new crypto modules for PIE.
   - Improve per-cpu commit message.
   - Fix xen 32-bit build error with .quad.
   - Remove extra code for ftrace.
 - patch v4:
   - Simplify early boot by removing global variables.
   - Modify the mcount location script for __mcount_loc intead of the address
 read in the ftrace implementation.
   - Edit commit description to explain better where the kernel can be located.
   - Streamlined the testing done on each patch proposal. Always testing
 hibernation, suspend, ftrace and kprobe to ensure no regressions.
 - patch v3:
   - Update on message to describe longer term PIE goal.
   - Minor change on ftrace if condition.
   - Changed code using xchgq.
 - patch v2:
   - Adapt patch to work post KPTI and compiler changes
   - Redo all performance testing with latest configs and compilers
   - Simplify mov macro on PIE (MOVABS now)
   - Reduce GOT footprint
 - patch v1:
   - Simplify ftrace implementation.
   - Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
 - rfc v3:
   - Use --emit-relocs instead of -pie to reduce dynamic relocation space on
 mapped memory. It also simplifies the relocation process.
   - Move the start the module section next to the kernel. Remove the need for
 -mcmodel=large on modules. Extends module space from 1 to 2G maximum.
   - Support for XEN PVH as 32-bit relocations can be ignored with
 --emit-relocs.
   - Support for GOT relocations previously done automatically with -pie.
   - Remove need for dynamic PLT in modules.
   - Support dymamic GOT for modules.
 - rfc v2:
   - Add support for global stack cookie while compiler default to fs without
 mcmodel=kernel
   - Change patch 7 to correctly jump out of the identity mapping on kexec load
 preserve.

These patches make some of the changes necessary to build the kernel as
Position Independent Executable (PIE) on x86_64. Another patchset will
add the PIE option and larger architecture changes.

The patches:
 - 1-2, 4-12: Change in assembly code to be PIE compliant.
 - 3: Add a new _ASM_MOVABS macro to fetch a symbol address generically.

diffstat:
 crypto/aegis128-aesni-asm.S |6 +-
 crypto/aegis128l-aesni-asm.S|8 +--
 crypto/aegis256-aesni-asm.S |6 +-
 crypto/aes-x86_64-asm_64.S  |   45 ++--
 crypto/aesni-intel_asm.S|8 +--
 crypto/camellia-aesni-avx-asm_64.S  |   42 +++
 crypto/camellia-aesni-avx2-asm_64.S |   44 
 crypto/camellia-x86_64-asm_64.S |8 +--
 crypto/cast5-avx-x86_64-asm_64.S|   50 ++
 crypto/cast6-avx-x86_64-asm_64.S|   44 +---
 crypto/des3_ede-asm_64.S|   96 
 crypto/ghash-clmulni-intel_asm.S|4 -
 crypto/glue_helper-asm-avx.S|4 -
 crypto/glue_helper-asm-avx2.S   |6 +-
 crypto/morus1280-avx2-asm.S |4 -
 crypto/morus1280-sse2-asm.S |8 +--
 crypto/morus640-sse2-asm.S  |6 +-
 crypto/sha256-avx2-asm.S|   23 +---
 entry/entry_64.S|   16 --
 include/asm/alternative.h   |6 +-
 include/asm/asm.h   |1 
 include/asm/jump_label.h|8 +--
 include/asm/paravirt_types.h|   12 +++-
 include/asm/pm-trace.h  |2 
 include/asm/processor.h |6 +-
 kernel/acpi/wakeup_64.S |   31 ++-
 kernel/head_64.S|   16 +++---
 kernel/relocate_kernel_64.S |2 
 power/hibernate_asm_64.S|4 -
 29 files changed, 299 insertions(+), 217 deletions(-)

Patchset is based on next-20190515.




[PATCH v7 05/12] x86/entry/64: Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/entry/entry_64.S | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 20e45d9b4e15..e99b3438aa9b 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1268,7 +1268,8 @@ ENTRY(error_entry)
movl%ecx, %eax  /* zero extend */
cmpq%rax, RIP+8(%rsp)
je  .Lbstep_iret
-   cmpq$.Lgs_change, RIP+8(%rsp)
+   leaq.Lgs_change(%rip), %rcx
+   cmpq%rcx, RIP+8(%rsp)
jne .Lerror_entry_done
 
/*
@@ -1465,10 +1466,10 @@ ENTRY(nmi)
 * resume the outer NMI.
 */
 
-   movq$repeat_nmi, %rdx
+   leaqrepeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  1f
-   movq$end_repeat_nmi, %rdx
+   leaqend_repeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  nested_nmi_out
 1:
@@ -1522,7 +1523,8 @@ nested_nmi:
pushq   %rdx
pushfq
pushq   $__KERNEL_CS
-   pushq   $repeat_nmi
+   leaqrepeat_nmi(%rip), %rdx
+   pushq   %rdx
 
/* Put stack back */
addq$(6*8), %rsp
@@ -1561,7 +1563,11 @@ first_nmi:
addq$8, (%rsp)  /* Fix up RSP */
pushfq  /* RFLAGS */
pushq   $__KERNEL_CS/* CS */
-   pushq   $1f /* RIP */
+   pushq   $0  /* Futur return address */
+   pushq   %rax/* Save RAX */
+   leaq1f(%rip), %rax  /* RIP */
+   movq%rax, 8(%rsp)   /* Put 1f on return address */
+   popq%rax/* Restore RAX */
iretq   /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
 1:
-- 
2.21.0.1020.gf2820cf01a-goog



[PATCH v7 04/12] x86: relocate_kernel - Adapt assembly for PIE support

2019-05-20 Thread Thomas Garnier
From: Thomas Garnier 

Change the assembly code to use only absolute references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/relocate_kernel_64.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index 11eda21eb697..3320368b6ec9 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -208,7 +208,7 @@ identity_mapped:
movq%rax, %cr3
lea PAGE_SIZE(%r8), %rsp
callswap_pages
-   movq$virtual_mapped, %rax
+   movabsq $virtual_mapped, %rax
pushq   %rax
ret
 
-- 
2.21.0.1020.gf2820cf01a-goog



Re: [PATCH v6 14/27] x86/percpu: Adapt percpu for PIE support

2019-04-08 Thread Thomas Garnier
On Fri, Feb 1, 2019 at 9:13 AM Thomas Garnier  wrote:
>
> On Thu, Jan 31, 2019 at 6:31 PM Christopher Lameter  wrote:
> >
> > On Thu, 31 Jan 2019, Thomas Garnier wrote:
> >
> > > The per-cpu symbols are in a section that is zero based to create
> > > offsets. The compiler doesn't see them as offsets but as relative
> > > symbol and try to relocate them. Given the distance between zero and
> > > the mapped kernel is much larger than the instruction offset range, it
> > > fails to do it.
> >
> > We switch that off in the linker. If that does not work with your
> > modifications then you need to figure out how to update the link
> > configuration.
> >
>
> It didn't work originally but I will revisit to see if I missed something.

I revisited and couldn't find a way to prevent relocations to the
percpu section. Without PIE, you can reference absolute address which
was convenient for percpu.

Christopher: Did you have something specific in mind?

I checked the following:
 - Changing the FLAGS() on the PHDRS.
 - using -z noreloc-overflow which actually doesn't seem to apply to
PC32 relocations.
 - Look at all linker options and script format for anything around that.


Re: [PATCH v6 02/27] x86: Use symbol name in jump table for PIE support

2019-02-07 Thread Thomas Garnier
On Thu, Feb 7, 2019 at 9:11 AM Borislav Petkov  wrote:
>
> On Thu, Feb 07, 2019 at 09:04:45AM -0800, Thomas Garnier wrote:
> > I assume that's an optimisation done by gcc later.
>
> So why is that change even needed? Where does it break?
>
> > The P modifier in the documentation does state that it is used to
> > generate PIC code.
>
> The documentation says:
>
> "If used for a function, print the PLT suffix and generate PIC code. For
> example, emit foo@PLT instead of ’foo’ for the function foo()."
>
> when you use %P for a function. Which is not how it is used here.

I did more checks about that. I think Ard's patch to make jump label
relative actually fixed the issue I had with them.

Thanks for spotting this, I will do additional checks and look at
removing this change.

>
> --
> Regards/Gruss,
> Boris.
>
> Good mailing practices for 400: avoid top-posting and trim the reply.


Re: [PATCH v6 02/27] x86: Use symbol name in jump table for PIE support

2019-02-07 Thread Thomas Garnier
On Thu, Feb 7, 2019 at 4:17 AM Borislav Petkov  wrote:
>
> On Thu, Jan 31, 2019 at 11:24:09AM -0800, Thomas Garnier wrote:
> > Replace the %c constraint with %P. The %c is incompatible with PIE
> > because it implies an immediate value whereas %P reference a symbol.
>
> How so?
>
> AFAIK, %c requires a constant operand and if %P is used to print a
> constant, it simply drops syntax-specific prefixes and does a bare
> constant.
>
> I guess that here
>
> https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html#x86Operandmodifiers
>
> is not entirely correct as it should not say "If used for a constant"
> for %P but say "symbol or constant".
>
> But before/after asm doesn't show any difference. So what gives?

I assume that's an optimisation done by gcc later. The P modifier in
the documentation does state that it is used to generate PIC code.

>
> before:
> # 39 "./arch/x86/include/asm/jump_label.h" 1
> 1:
> .byte 0xe9
>  .long .L241 - 2f   #
> 2:
> .pushsection __jump_table,  "aw"
>  .balign 8
> .long 1b - ., .L241 - . #
>  .quad __use_tsc + 1 - .#,
> .popsection
>
> after:
> # 39 "./arch/x86/include/asm/jump_label.h" 1
> 1:
> .byte 0xe9
>  .long .L241 - 2f   #
> 2:
> .pushsection __jump_table,  "aw"
>  .balign 8
> .long 1b - ., .L241 - . #
>  .quad __use_tsc+1 - .  #
> .popsection
>
> --
> Regards/Gruss,
> Boris.
>
> Good mailing practices for 400: avoid top-posting and trim the reply.


Re: [PATCH v6 01/27] x86/crypto: Adapt assembly for PIE support

2019-02-07 Thread Thomas Garnier
On Thu, Feb 7, 2019 at 3:49 AM Borislav Petkov  wrote:
>
> On Thu, Jan 31, 2019 at 11:24:08AM -0800, Thomas Garnier wrote:
> > Change the assembly code to use only relative references of symbols for the
> > kernel to be PIE compatible.
> >
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range below 0x8000.
>
> This sentence is auto-sprinkled in a bunch of commit messages. Sounds to
> me it should be rather somewhere in the 0/n message as a justification
> for the feature but not in every other commit message...?

I go into more details in the compose message but I wanted a small
sentence explaining why PIE changes are being made.

>
> --
> Regards/Gruss,
> Boris.
>
> Good mailing practices for 400: avoid top-posting and trim the reply.


Re: [PATCH v6 19/27] kvm: Adapt assembly for PIE support

2019-02-06 Thread Thomas Garnier
On Wed, Feb 6, 2019 at 11:56 AM Sean Christopherson
 wrote:
>
> On Thu, Jan 31, 2019 at 11:24:26AM -0800, Thomas Garnier wrote:
> > Change the assembly code to use only relative references of symbols for the
> > kernel to be PIE compatible. The new __ASM_MOVABS macro is used to
> > get the address of a symbol on both 32 and 64-bit with PIE support.
> >
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range below 0xffff8000.
> >
> > Signed-off-by: Thomas Garnier 
> > ---
> >  arch/x86/include/asm/kvm_host.h | 8 ++--
> >  arch/x86/kernel/kvm.c   | 6 --
> >  arch/x86/kvm/svm.c  | 4 ++--
> >  arch/x86/kvm/vmx/vmx.c  | 2 +-
> >  4 files changed, 13 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h 
> > b/arch/x86/include/asm/kvm_host.h
> > index 4660ce90de7f..fdb3307d5fe1 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1498,9 +1498,13 @@ asmlinkage void kvm_spurious_fault(void);
> >   ".pushsection .fixup, \"ax\" \n" \
> >   "667: \n\t" \
> >   cleanup_insn "\n\t"   \
> > - "cmpb $0, kvm_rebooting \n\t" \
> > + "cmpb $0, kvm_rebooting" __ASM_SEL(, (%%rip)) " \n\t" \
> >   "jne 668b \n\t"   \
> > - __ASM_SIZE(push) " $666b \n\t"\
> > + __ASM_SIZE(push) "$0 \n\t"  \
> > + __ASM_SIZE(push) "%%" _ASM_AX " \n\t"   \
> > + _ASM_MOVABS " $666b, %%" _ASM_AX "\n\t" \
> > + _ASM_MOV " %%" _ASM_AX ", " __ASM_SEL(4, 8) "(%%" _ASM_SP ") \n\t" \
> > + __ASM_SIZE(pop) "%%" _ASM_AX " \n\t"\
>
> This blob isn't very intuitive to begin with, and the extra stack
> shenanigans are a bit much when PIE is disabled.  What about breaking
> out the behavior to separate helper macros to keep the simpler code
> for non-PIE and to make the code somewhat self-documenting?  E.g.:
>
> #ifndef CONFIG_X86_PIE
> #define KVM_PUSH_FAULTING_INSN_RIP   __ASM_SIZE(push) " $666b \n\t"
> #else
> #define KVM_PUSH_FAULTING_INSN_RIP \
> __ASM_SIZE(push) "$0 \n\t" \
> __ASM_SIZE(push) "%%" _ASM_AX " \n\t"  \
> _ASM_MOVABS " $666b, %%" _ASM_AX "\n\t"\
> _ASM_MOV " %%" _ASM_AX ", " __ASM_SEL(4, 8) "(%%" _ASM_SP ") \n\t" \
> __ASM_SIZE(pop) "%%" _ASM_AX " \n\t"
> #endif
>
> #define kvm_handle_fault_on_reboot(insn, cleanup_insn)  \
> "666: " insn "\n\t" \
> "668: \n\t"   \
> ".pushsection .fixup, \"ax\" \n" \
> "667: \n\t" \
> cleanup_insn "\n\t"   \
> "cmpb $0, kvm_rebooting" __ASM_SEL(, (%%rip)) " \n\t" \
> "jne 668b \n\t"   \
> KVM_PUSH_FAULTING_INSN_RIP  \
> "jmp kvm_spurious_fault \n\t" \
> ".popsection \n\t" \
> _ASM_EXTABLE(666b, 667b)
>
> >   "jmp kvm_spurious_fault \n\t" \
> >   ".popsection \n\t" \
> >   _ASM_EXTABLE(666b, 667b)
> > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> > index 5c93a65ee1e5..f6eb02004e43 100644
> > --- a/arch/x86/kernel/kvm.c
> > +++ b/arch/x86/kernel/kvm.c
>
> This change to arch/x86/kernel/kvm.c should be done in a separate patch
> as it affects the kernel itself when running as a guest under KVM,
> whereas arch/x86/kvm/**/* and arch/x86/include/asm/kvm_host.h affect
> KVM as a host, i.e. the KVM module.  Case in point, the below bug causes
> a kernel panic when running as a KVM guest but has no impact on the KVM
> module.

Got it, will split in next iteration.

>
> > @@ -826,8 +826,10 @@ asm(
> >  ".global __raw_callee_save___kvm_vcpu_is_preempted;"
> >  ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
> >  "__raw_callee_save___kvm_vcpu_is_preempted:"
> > -"movq__per_cpu_offset(,%rdi,8), %rax;"
> > -"cmpb$0, "

Re: [PATCH v6 20/27] x86: Support global stack cookie

2019-02-01 Thread Thomas Garnier
On Fri, Feb 1, 2019 at 2:36 PM Andy Lutomirski  wrote:
>
>
> > On Feb 1, 2019, at 12:21 PM, Thomas Garnier  wrote:
> >
> >> On Fri, Feb 1, 2019 at 11:27 AM Andy Lutomirski  wrote:
> >>
> >>> On Thu, Jan 31, 2019 at 11:29 AM Thomas Garnier  
> >>> wrote:
> >>>
> >>> Add an off-by-default configuration option to use a global stack cookie
> >>> instead of the default TLS. This configuration option will only be used
> >>> with PIE binaries.
> >>>
> >>> For kernel stack cookie, the compiler uses the mcmodel=kernel to switch
> >>> between the fs segment to gs segment. A PIE binary does not use
> >>> mcmodel=kernel because it can be relocated anywhere, therefore the
> >>> compiler will default to the fs segment register. This is fixed on the
> >>> latest version of gcc.
> >>
> >> I hate all these gcc-sucks-so-we-hack-it-and-change-nasty-semantics
> >> options.  How about just preventing use of both stack protector and
> >> PIE unless the version of gcc in use is new enough.
> >
> > So fail the build in this scenario?
>
> Fail the build or use some Kconfig magic to prevent this from being 
> configured in the first place.

Ok, I can do that in next iteration.

>
> >
> >>
> >> Also, does -mstack-protector-guard-reg not solve this?  See
> >> https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708.  Or is there
> >> another bug?  Or are you worried about gcc versions that don't have
> >> that feature yet?
> >
> > I am worried about gcc versions that don't have this feature, yes.


Re: [PATCH v6 20/27] x86: Support global stack cookie

2019-02-01 Thread Thomas Garnier
On Fri, Feb 1, 2019 at 11:27 AM Andy Lutomirski  wrote:
>
> On Thu, Jan 31, 2019 at 11:29 AM Thomas Garnier  wrote:
> >
> > Add an off-by-default configuration option to use a global stack cookie
> > instead of the default TLS. This configuration option will only be used
> > with PIE binaries.
> >
> > For kernel stack cookie, the compiler uses the mcmodel=kernel to switch
> > between the fs segment to gs segment. A PIE binary does not use
> > mcmodel=kernel because it can be relocated anywhere, therefore the
> > compiler will default to the fs segment register. This is fixed on the
> > latest version of gcc.
>
> I hate all these gcc-sucks-so-we-hack-it-and-change-nasty-semantics
> options.  How about just preventing use of both stack protector and
> PIE unless the version of gcc in use is new enough.

So fail the build in this scenario?

>
> Also, does -mstack-protector-guard-reg not solve this?  See
> https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708.  Or is there
> another bug?  Or are you worried about gcc versions that don't have
> that feature yet?

I am worried about gcc versions that don't have this feature, yes.


Re: [PATCH v6 15/27] compiler: Option to default to hidden symbols

2019-02-01 Thread Thomas Garnier
On Fri, Feb 1, 2019 at 12:24 AM Adrian Hunter  wrote:
>
> On 31/01/19 9:24 PM, Thomas Garnier wrote:
> > Provide an option to default visibility to hidden except for key
> > symbols. This option is disabled by default and will be used by x86_64
> > PIE support to remove errors between compilation units.
> >
> > The default visibility is also enabled for external symbols that are
> > compared as they maybe equals (start/end of sections). In this case,
> > older versions of GCC will remove the comparison if the symbols are
> > hidden. This issue exists at least on gcc 4.9 and before.
>
> What does this mean, if anything, for what/how symbols appear in 
> /proc/kallsyms?

No, they will still appear in kallsyms. Some symbols are not absolute
anymore or moved sections but still present.

-- 
Thomas


Re: [PATCH v6 14/27] x86/percpu: Adapt percpu for PIE support

2019-02-01 Thread Thomas Garnier
On Thu, Jan 31, 2019 at 6:31 PM Christopher Lameter  wrote:
>
> On Thu, 31 Jan 2019, Thomas Garnier wrote:
>
> > The per-cpu symbols are in a section that is zero based to create
> > offsets. The compiler doesn't see them as offsets but as relative
> > symbol and try to relocate them. Given the distance between zero and
> > the mapped kernel is much larger than the instruction offset range, it
> > fails to do it.
>
> We switch that off in the linker. If that does not work with your
> modifications then you need to figure out how to update the link
> configuration.
>

It didn't work originally but I will revisit to see if I missed something.


Re: [PATCH v6 13/27] x86/boot/64: Build head64.c as mcmodel large when PIE is enabled

2019-02-01 Thread Thomas Garnier
On Fri, Feb 1, 2019 at 3:15 AM Kirill A. Shutemov  wrote:
>
> On Thu, Jan 31, 2019 at 11:24:20AM -0800, Thomas Garnier wrote:
> > The __startup_64 function assumes all symbols have relocated addresses
> > instead of the current boot virtual address. PIE generated code favor
> > relative addresses making all virtual and physical address math incorrect.
> > If PIE is enabled, build head64.c as mcmodel large instead to ensure 
> > absolute
> > references on all memory access. Add a global __force_order variable 
> > required
> > when using a large model with read_cr* functions.
> >
> > To build head64.c as mcmodel=large, disable the retpoline gcc flags.
> > This code is used at early boot and removed later, it doesn't need
> > retpoline mitigation.
> >
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range below 0x8000.
> >
> > Signed-off-by: Thomas Garnier 
> > ---
> >  arch/x86/kernel/Makefile | 6 ++
> >  arch/x86/kernel/head64.c | 3 +++
> >  2 files changed, 9 insertions(+)
> >
> > diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
> > index 00b7e27bc2b7..1f98f52eab9f 100644
> > --- a/arch/x86/kernel/Makefile
> > +++ b/arch/x86/kernel/Makefile
> > @@ -22,6 +22,12 @@ CFLAGS_REMOVE_early_printk.o = -pg
> >  CFLAGS_REMOVE_head64.o = -pg
> >  endif
> >
> > +ifdef CONFIG_X86_PIE
> > +# Remove PIE and retpoline flags that are incompatible with mcmodel=large
> > +CFLAGS_REMOVE_head64.o += -fPIE -mindirect-branch=thunk-extern 
> > -mindirect-branch-register
> > +CFLAGS_head64.o = -mcmodel=large
> > +endif
> > +
> >  KASAN_SANITIZE_head$(BITS).o := n
> >  KASAN_SANITIZE_dumpstack.o   := n
> >  KASAN_SANITIZE_dumpstack_$(BITS).o   := n
> > diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
> > index 16b1cbd3a61e..22e81275495b 100644
> > --- a/arch/x86/kernel/head64.c
> > +++ b/arch/x86/kernel/head64.c
> > @@ -63,6 +63,9 @@ EXPORT_SYMBOL(vmemmap_base);
> >
> >  #define __head   __section(.head.text)
> >
> > +/* Required for read_cr3 when building as PIE */
> > +unsigned long __force_order;
> > +
>
> I believe it only needed for GCC < 5. Newer GCC can eliminate the
> reference. See my comment in arch/x86/boot/compressed/pgtable_64.c.
>
> Maybe we should expand the comment here too?

Make sense, I will add a similar comment in the next iteration. Thanks
for pointing that out.

>
> --
>  Kirill A. Shutemov


Re: [PATCH v6 15/27] compiler: Option to default to hidden symbols

2019-02-01 Thread Thomas Garnier
On Thu, Jan 31, 2019 at 11:13 PM Dan Carpenter  wrote:
>
> On Thu, Jan 31, 2019 at 11:24:22AM -0800, Thomas Garnier wrote:
> > Provide an option to default visibility to hidden except for key
> > symbols.
>
> It took me a while to figure out the verb in this sentence...  :P

I agree, I should rewrite this description.

>
> It's weird that we're annotating things to be default.  What visibility
> are they we leave off the annotation?

The word "default" is confusing but that's how it is called by gcc.
Symbols not marked default are hidden.

>
> regards,
> dan carpenter
>


-- 
Thomas


Re: [PATCH v6 14/27] x86/percpu: Adapt percpu for PIE support

2019-01-31 Thread Thomas Garnier
On Thu, Jan 31, 2019 at 12:57 PM Christopher Lameter  wrote:
>
> On Thu, 31 Jan 2019, Thomas Garnier wrote:
>
> > Perpcu uses a clever design where the .percu ELF section has a virtual
> > address of zero and the custom linux relocation code avoid relocating
> > specific symbols. It makes the code simple and easily adaptable with or
> > without SMP support.
>
> We usually talk about this as offsets rather than addressess. The intend
> here is to give every processor its own address that is unique for this
> processor. Operations are always relative to a segment register and the
> whole area can be relocated at will by simply changing the segment
> register.
>
> > This design is incompatible with PIE. While creating a PIE binary, the
> > copmiler tries to make everything relative. The compiler will attempt to
>
> This is very compatible with PIE because it is already relative.

The per-cpu symbols are in a section that is zero based to create
offsets. The compiler doesn't see them as offsets but as relative
symbol and try to relocate them. Given the distance between zero and
the mapped kernel is much larger than the instruction offset range, it
fails to do it.

>
> > generate instructions with the distance between zero and any 64-bit
> > virtual address. It will fail as the relocation range cannot fit within
> > the possible instructions accessing a segment register.
>
> Leave the offsets alone and just change the segment register if you need
> to relocate the area of a specific processor?
>
> > The assembly and PER_CPU macros are changed to use relative references
> > when PIE is enabled.
>
> They already use relative reference. What is the point here?
>
> > --- a/arch/x86/include/asm/percpu.h
> > +++ b/arch/x86/include/asm/percpu.h
> > @@ -5,9 +5,11 @@
> >  #ifdef CONFIG_X86_64
> >  #define __percpu_seg gs
> >  #define __percpu_mov_op  movq
> > +#define __percpu_rel (%rip)
>
> The percpu section cannot be IP relative since we need to have separate
> address spaces per cpu.
>


[PATCH v6 02/27] x86: Use symbol name in jump table for PIE support

2019-01-31 Thread Thomas Garnier
Replace the %c constraint with %P. The %c is incompatible with PIE
because it implies an immediate value whereas %P reference a symbol.
Change the _ASM_PTR reference to .long for expected relocation size and
add a long padding to ensure entry alignment.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/jump_label.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/jump_label.h 
b/arch/x86/include/asm/jump_label.h
index 65191ce8e1cf..e47fad8ee632 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -25,9 +25,9 @@ static __always_inline bool arch_static_branch(struct 
static_key *key, bool bran
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
-   _ASM_PTR "%c0 + %c1 - .\n\t"
+   _ASM_PTR "%P0 - .\n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : :  "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
@@ -42,9 +42,9 @@ static __always_inline bool arch_static_branch_jump(struct 
static_key *key, bool
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
-   _ASM_PTR "%c0 + %c1 - .\n\t"
+   _ASM_PTR "%P0 - .\n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : : "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 15/27] compiler: Option to default to hidden symbols

2019-01-31 Thread Thomas Garnier
Provide an option to default visibility to hidden except for key
symbols. This option is disabled by default and will be used by x86_64
PIE support to remove errors between compilation units.

The default visibility is also enabled for external symbols that are
compared as they maybe equals (start/end of sections). In this case,
older versions of GCC will remove the comparison if the symbols are
hidden. This issue exists at least on gcc 4.9 and before.

Signed-off-by: Thomas Garnier 
---
 arch/x86/boot/boot.h |  2 +-
 arch/x86/include/asm/setup.h |  2 +-
 arch/x86/kernel/cpu/microcode/core.c |  4 ++--
 drivers/base/firmware_loader/main.c  |  4 ++--
 include/asm-generic/sections.h   |  6 ++
 include/linux/compiler.h |  7 +++
 init/Kconfig |  7 +++
 kernel/kallsyms.c| 16 
 kernel/trace/trace.h |  4 ++--
 lib/dynamic_debug.c  |  4 ++--
 10 files changed, 38 insertions(+), 18 deletions(-)

diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 32a09eb5c101..c4afcfecc817 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -193,7 +193,7 @@ static inline bool memcmp_gs(const void *s1, addr_t s2, 
size_t len)
 }
 
 /* Heap -- available for dynamic lists. */
-extern char _end[];
+extern char _end[] __default_visibility;
 extern char *HEAP;
 extern char *heap_end;
 #define RESET_HEAP() ((void *)( HEAP = _end ))
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ed8ec011a9fd..74f0a8d87986 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -71,7 +71,7 @@ static inline void x86_ce4100_early_setup(void) { }
  * This is set up by the setup-routine at boot-time
  */
 extern struct boot_params boot_params;
-extern char _text[];
+extern char _text[] __default_visibility;
 
 static inline bool kaslr_enabled(void)
 {
diff --git a/arch/x86/kernel/cpu/microcode/core.c 
b/arch/x86/kernel/cpu/microcode/core.c
index 97f9ada9ceda..04ca89e65f79 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -149,8 +149,8 @@ static bool __init check_loader_disabled_bsp(void)
return *res;
 }
 
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
+extern struct builtin_fw __start_builtin_fw[] __default_visibility;
+extern struct builtin_fw __end_builtin_fw[] __default_visibility;
 
 bool get_builtin_firmware(struct cpio_data *cd, const char *name)
 {
diff --git a/drivers/base/firmware_loader/main.c 
b/drivers/base/firmware_loader/main.c
index 8e9213b36e31..f04096161a52 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -94,8 +94,8 @@ static struct firmware_cache fw_cache;
 
 #ifdef CONFIG_FW_LOADER
 
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
+extern struct builtin_fw __start_builtin_fw[] __default_visibility;
+extern struct builtin_fw __end_builtin_fw[] __default_visibility;
 
 static void fw_copy_to_prealloc_buf(struct firmware *fw,
void *buf, size_t size)
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d79abca81a52..94f072f3a48d 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -32,6 +32,9 @@
  * __softirqentry_text_start, __softirqentry_text_end
  * __start_opd, __end_opd
  */
+#ifdef CONFIG_DEFAULT_HIDDEN_SYMS
+#pragma GCC visibility push(default)
+#endif
 extern char _text[], _stext[], _etext[];
 extern char _data[], _sdata[], _edata[];
 extern char __bss_start[], __bss_stop[];
@@ -49,6 +52,9 @@ extern char __start_once[], __end_once[];
 
 /* Start and end of .ctors section - used for constructor calls. */
 extern char __ctors_start[], __ctors_end[];
+#ifdef CONFIG_DEFAULT_HIDDEN_SYMS
+#pragma GCC visibility pop
+#endif
 
 /* Start and end of .opd section - used for function descriptors. */
 extern char __start_opd[], __end_opd[];
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 87692fdae97a..5c0723604a52 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -287,6 +287,13 @@ unsigned long read_word_at_a_time(const void *addr)
__u.__val;  \
 })
 
+#ifdef CONFIG_DEFAULT_HIDDEN_SYMS
+#pragma GCC visibility push(hidden)
+#define __default_visibility  __attribute__((visibility ("default")))
+#else
+#define __default_visibility
+#endif
+
 #endif /* __KERNEL__ */
 
 /*
diff --git a/init/Kconfig b/init/Kconfig
index bb383615823a..116e0de4817f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1848,6 +1848,13 @@ config CMDLINE_OVERRIDE
 
 endif
 
+#
+# Default to hidden visibility for all symbols.
+# Useful for Position Independent Code to reduce global references.
+#
+config DEFAULT_HIDDEN_SYMS
+   bool
+
 endmenu# Gen

[PATCH v6 17/27] x86/relocs: Handle PIE relocations

2019-01-31 Thread Thomas Garnier
Change the relocation tool to correctly handle relocations generated by
-fPIE option:

 - Add relocation for each entry of the .got section given the linker does not
   generate R_X86_64_GLOB_DAT on a simple link.
 - Ignore R_X86_64_GOTPCREL.

Signed-off-by: Thomas Garnier 
---
 arch/x86/tools/relocs.c | 96 -
 1 file changed, 95 insertions(+), 1 deletion(-)

diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b629f6992d9f..2a3c703218cc 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -32,6 +32,7 @@ struct section {
Elf_Sym*symtab;
Elf_Rel*reltab;
char   *strtab;
+   Elf_Addr   *got;
 };
 static struct section *secs;
 
@@ -295,6 +296,36 @@ static Elf_Sym *sym_lookup(const char *symname)
return 0;
 }
 
+static Elf_Sym *sym_lookup_addr(Elf_Addr addr, const char **name)
+{
+   int i;
+
+   for (i = 0; i < ehdr.e_shnum; i++) {
+   struct section *sec = [i];
+   long nsyms;
+   Elf_Sym *symtab;
+   Elf_Sym *sym;
+
+   if (sec->shdr.sh_type != SHT_SYMTAB)
+   continue;
+
+   nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
+   symtab = sec->symtab;
+
+   for (sym = symtab; --nsyms >= 0; sym++) {
+   if (sym->st_value == addr) {
+   if (name) {
+   *name = sym_name(sec->link->strtab,
+sym);
+   }
+   return sym;
+   }
+   }
+   }
+   return 0;
+}
+
+
 #if BYTE_ORDER == LITTLE_ENDIAN
 #define le16_to_cpu(val) (val)
 #define le32_to_cpu(val) (val)
@@ -515,6 +546,35 @@ static void read_relocs(FILE *fp)
}
 }
 
+static void read_got(FILE *fp)
+{
+   int i;
+
+   for (i = 0; i < ehdr.e_shnum; i++) {
+   struct section *sec = [i];
+
+   sec->got = NULL;
+   if (sec->shdr.sh_type != SHT_PROGBITS ||
+   strcmp(sec_name(i), ".got")) {
+   continue;
+   }
+   sec->got = malloc(sec->shdr.sh_size);
+   if (!sec->got) {
+   die("malloc of %d bytes for got failed\n",
+   sec->shdr.sh_size);
+   }
+   if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
+   die("Seek to %d failed: %s\n",
+   sec->shdr.sh_offset, strerror(errno));
+   }
+   if (fread(sec->got, 1, sec->shdr.sh_size, fp)
+   != sec->shdr.sh_size) {
+   die("Cannot read got: %s\n",
+   strerror(errno));
+   }
+   }
+}
+
 
 static void print_absolute_symbols(void)
 {
@@ -645,6 +705,32 @@ static void add_reloc(struct relocs *r, uint32_t offset)
r->offset[r->count++] = offset;
 }
 
+/*
+ * The linker does not generate relocations for the GOT for the kernel.
+ * If a GOT is found, simulate the relocations that should have been included.
+ */
+static void walk_got_table(int (*process)(struct section *sec, Elf_Rel *rel,
+ Elf_Sym *sym, const char *symname),
+  struct section *sec)
+{
+   int i;
+   Elf_Addr entry;
+   Elf_Sym *sym;
+   const char *symname;
+   Elf_Rel rel;
+
+   for (i = 0; i < sec->shdr.sh_size/sizeof(Elf_Addr); i++) {
+   entry = sec->got[i];
+   sym = sym_lookup_addr(entry, );
+   if (!sym)
+   die("Could not found got symbol for entry %d\n", i);
+   rel.r_offset = sec->shdr.sh_addr + i * sizeof(Elf_Addr);
+   rel.r_info = ELF_BITS == 64 ? R_X86_64_GLOB_DAT
+: R_386_GLOB_DAT;
+   process(sec, , sym, symname);
+   }
+}
+
 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
 {
@@ -658,6 +744,8 @@ static void walk_relocs(int (*process)(struct section *sec, 
Elf_Rel *rel,
struct section *sec = [i];
 
if (sec->shdr.sh_type != SHT_REL_TYPE) {
+   if (sec->got)
+   walk_got_table(process, sec);
continue;
}
sec_symtab  = sec->link;
@@ -767,6 +855,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, 
ElfW(Sym) *sym,
offset += per_cpu_load_addr;
 
switch (r_type) {
+   case R_X86_64_GOTPCREL:
case R_X86_64_NONE:

[PATCH v6 05/27] x86/entry/64: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/entry/entry_64.S | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1f0efdb7b629..16a93eb4c11f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1269,7 +1269,8 @@ ENTRY(error_entry)
movl%ecx, %eax  /* zero extend */
cmpq%rax, RIP+8(%rsp)
je  .Lbstep_iret
-   cmpq$.Lgs_change, RIP+8(%rsp)
+   leaq.Lgs_change(%rip), %rcx
+   cmpq%rcx, RIP+8(%rsp)
jne .Lerror_entry_done
 
/*
@@ -1466,10 +1467,10 @@ ENTRY(nmi)
 * resume the outer NMI.
 */
 
-   movq$repeat_nmi, %rdx
+   leaqrepeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  1f
-   movq$end_repeat_nmi, %rdx
+   leaqend_repeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  nested_nmi_out
 1:
@@ -1523,7 +1524,8 @@ nested_nmi:
pushq   %rdx
pushfq
pushq   $__KERNEL_CS
-   pushq   $repeat_nmi
+   leaqrepeat_nmi(%rip), %rdx
+   pushq   %rdx
 
/* Put stack back */
addq$(6*8), %rsp
@@ -1562,7 +1564,11 @@ first_nmi:
addq$8, (%rsp)  /* Fix up RSP */
pushfq  /* RFLAGS */
pushq   $__KERNEL_CS/* CS */
-   pushq   $1f /* RIP */
+   pushq   $0  /* Futur return address */
+   pushq   %rax/* Save RAX */
+   leaq1f(%rip), %rax  /* RIP */
+   movq%rax, 8(%rsp)   /* Put 1f on return address */
+   popq%rax/* Restore RAX */
iretq   /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
 1:
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 11/27] x86/paravirt: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
if PIE is enabled, switch the paravirt assembly constraints to be
compatible. The %c/i constrains generate smaller code so is kept by
default.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/paravirt_types.h | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 488c59686a73..8cfcc1b463de 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -342,9 +342,17 @@ extern struct paravirt_patch_template pv_ops;
 #define PARAVIRT_PATCH(x)  \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
 
+#ifdef CONFIG_X86_PIE
+#define paravirt_opptr_call "a"
+#define paravirt_opptr_type "p"
+#else
+#define paravirt_opptr_call "c"
+#define paravirt_opptr_type "i"
+#endif
+
 #define paravirt_type(op)  \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)),\
-   [paravirt_opptr] "i" (&(pv_ops.op))
+   [paravirt_opptr] paravirt_opptr_type (&(pv_ops.op))
 #define paravirt_clobber(clobber)  \
[paravirt_clobber] "i" (clobber)
 
@@ -392,7 +400,7 @@ int paravirt_disable_iospace(void);
  */
 #define PARAVIRT_CALL  \
ANNOTATE_RETPOLINE_SAFE \
-   "call *%c[paravirt_opptr];"
+   "call *%" paravirt_opptr_call "[paravirt_opptr];"
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 25/27] x86/pie: Add option to build the kernel as PIE

2019-01-31 Thread Thomas Garnier
Add the CONFIG_X86_PIE option which builds the kernel as a Position
Independent Executable (PIE). The kernel is currently build with the
mcmodel=kernel option which forces it to stay on the top 2G of the
virtual address space. With PIE, the kernel will be able to move below
the current limit.

The --emit-relocs linker option was kept instead of using -pie to limit
the impact on mapped sections. Any incompatible relocation will be
catch by the arch/x86/tools/relocs binary at compile time.

If segment based stack cookies are enabled, try to use the compiler
option to select the segment register. If not available, automatically
enabled global stack cookie in auto mode. Otherwise, recommend
compiler update or global stack cookie option.

Performance/Size impact:

Size of vmlinux (Default configuration):
 File size:
 - PIE disabled: +0.18%
 - PIE enabled: -1.977% (less relocations)
 .text section:
 - PIE disabled: same
 - PIE enabled: same

Size of vmlinux (Ubuntu configuration):
 File size:
 - PIE disabled: +0.21%
 - PIE enabled: +10%
 .text section:
 - PIE disabled: same
 - PIE enabled: +0.001%

The size increase is mainly due to not having access to the 32-bit signed
relocation that can be used with mcmodel=kernel. A small part is due to reduced
optimization for PIE code. This bug [1] was opened with gcc to provide a better
code generation for kernel PIE.

Hackbench (50% and 1600% on thread/process for pipe/sockets):
 - PIE disabled: no significant change (avg -/+ 0.5% on latest test).
 - PIE enabled: between -1% to +1% in average (default and Ubuntu config).

Kernbench (average of 10 Half and Optimal runs):
 Elapsed Time:
 - PIE disabled: no significant change (avg -0.5%)
 - PIE enabled: average -0.5% to +0.5%
 System Time:
 - PIE disabled: no significant change (avg -0.1%)
 - PIE enabled: average -0.4% to +0.4%.

[1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82303

Signed-off-by: Thomas Garnier 
---
 arch/x86/Kconfig  |  8 
 arch/x86/Makefile | 45 -
 2 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c3ad1b0ae1a1..e4316b8ed130 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2237,6 +2237,14 @@ config X86_GLOBAL_STACKPROTECTOR
 
   If unsure, say N
 
+config X86_PIE
+   bool
+   depends on X86_64
+   select DEFAULT_HIDDEN_SYMS
+   select WEAK_PROVIDE_HIDDEN
+   select DYNAMIC_MODULE_BASE
+   select MODULE_REL_CRCS if MODVERSIONS
+
 config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5e9c1b02cf87..f72f78510059 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -60,6 +60,8 @@ endif
 KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
 KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
 
+stackglobal := $(call cc-option-yn, -mstack-protector-guard=global)
+
 ifeq ($(CONFIG_X86_32),y)
 BITS := 32
 UTS_MACHINE := i386
@@ -130,14 +132,55 @@ else
 
 KBUILD_CFLAGS += -mno-red-zone
 ifdef CONFIG_X86_PIE
+KBUILD_CFLAGS += -fPIE
 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/x86/kernel/module.lds
+
+# Relax relocation in both CFLAGS and LDFLAGS to support older 
compilers
+KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no)
+LDFLAGS_vmlinux += $(call ld-option,--no-relax)
+KBUILD_LDFLAGS_MODULE += $(call ld-option,--no-relax)
+
+# Stack validation is not yet support due to self-referenced switches
+ifdef CONFIG_STACK_VALIDATION
+$(warning CONFIG_STACK_VALIDATION is not yet supported for x86_64 pie \
+   build.)
+SKIP_STACK_VALIDATION := 1
+export SKIP_STACK_VALIDATION
+endif
+
+ifndef CONFIG_CC_STACKPROTECTOR_NONE
+ifndef CONFIG_X86_GLOBAL_STACKPROTECTOR
+stackseg-flag := -mstack-protector-guard-reg=%gs
+ifeq ($(call cc-option-yn,$(stackseg-flag)),n)
+# Try to enable global stack cookie if possible
+ifeq ($(stackglobal), y)
+$(warning Cannot use CONFIG_CC_STACKPROTECTOR_* while \
+building a position independent kernel. \
+Default to global stack protector \
+(CONFIG_X86_GLOBAL_STACKPROTECTOR).)
+CONFIG_X86_GLOBAL_STACKPROTECTOR := y
+KBUILD_CFLAGS += -DCONFIG_X86_GLOBAL_STACKPROTECTOR
+KBUILD_AFLAGS += -DCONFIG_X86_GLOBAL_STACKPROTECTOR
+else
+$(error echo Cannot use \
+CONFIG_CC_STACKPROTECTOR_(REGULAR|STRONG|AUTO) 
\
+while building a position independent binary. \
+Update your compiler or use \
+CONFIG_X86_GLOBAL_STAC

[PATCH v6 24/27] x86/mm: Make the x86 GOT read-only

2019-01-31 Thread Thomas Garnier
The GOT is changed during early boot when relocations are applied. Make
it read-only directly. This table exists only for PIE binary.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 include/asm-generic/vmlinux.lds.h | 12 
 1 file changed, 12 insertions(+)

diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
index 3d7a6a9c2370..0a038594c878 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -323,6 +323,17 @@
__end_ro_after_init = .;
 #endif
 
+#ifdef CONFIG_X86_PIE
+#define RO_GOT_X86 \
+   .got: AT(ADDR(.got) - LOAD_OFFSET) {\
+   __start_got = .;\
+   *(.got);\
+   __end_got = .;  \
+   }
+#else
+#define RO_GOT_X86
+#endif
+
 /*
  * Read only Data
  */
@@ -379,6 +390,7 @@
__end_builtin_fw = .;   \
}   \
\
+   RO_GOT_X86  \
TRACEDATA   \
\
/* Kernel symbol table: Normal symbols */   \
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 20/27] x86: Support global stack cookie

2019-01-31 Thread Thomas Garnier
Add an off-by-default configuration option to use a global stack cookie
instead of the default TLS. This configuration option will only be used
with PIE binaries.

For kernel stack cookie, the compiler uses the mcmodel=kernel to switch
between the fs segment to gs segment. A PIE binary does not use
mcmodel=kernel because it can be relocated anywhere, therefore the
compiler will default to the fs segment register. This is fixed on the
latest version of gcc.

If the segment selector is available, it will be automatically added. If
the automatic configuration was selected, a warning is written and the
global variable stack cookie is used. If a specific stack mode was
selected (regular or strong) and the compiler does not support selecting
the segment register, an error is emitted.

Signed-off-by: Thomas Garnier 
---
 arch/x86/Kconfig  | 12 
 arch/x86/Makefile |  9 +
 arch/x86/entry/entry_32.S |  3 ++-
 arch/x86/entry/entry_64.S |  3 ++-
 arch/x86/include/asm/processor.h  |  3 ++-
 arch/x86/include/asm/stackprotector.h | 19 ++-
 arch/x86/kernel/asm-offsets.c |  3 ++-
 arch/x86/kernel/asm-offsets_32.c  |  3 ++-
 arch/x86/kernel/asm-offsets_64.c  |  3 ++-
 arch/x86/kernel/cpu/common.c  |  3 ++-
 arch/x86/kernel/head_32.S |  3 ++-
 arch/x86/kernel/process.c |  5 +
 12 files changed, 56 insertions(+), 13 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0519da6f8ee4..263d81c570b2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2221,6 +2221,18 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
 
   If unsure, leave at the default value.
 
+config X86_GLOBAL_STACKPROTECTOR
+   bool "Stack cookie using a global variable"
+   depends on CC_STACKPROTECTOR_AUTO
+   default n
+   help
+  This option turns on the "stack-protector" GCC feature using a global
+  variable instead of a segment register. It is useful when the
+  compiler does not support custom segment registers when building a
+  position independent (PIE) binary.
+
+  If unsure, say N
+
 config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 76bc4dc03d5e..65d6d9a1dd22 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -132,6 +132,15 @@ else
 KBUILD_CFLAGS += -mcmodel=kernel
 endif
 
+ifdef CONFIG_X86_GLOBAL_STACKPROTECTOR
+ifeq ($(call cc-option, -mstack-protector-guard=global),)
+$(error Cannot use CONFIG_X86_GLOBAL_STACKPROTECTOR: \
+-mstack-protector-guard=global not supported \
+by compiler)
+endif
+KBUILD_CFLAGS += -mstack-protector-guard=global
+endif
+
 ifdef CONFIG_X86_X32
x32_ld_ok := $(call try-run,\
/bin/echo -e '1: .quad 1b' | \
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d309f30cf7af..1a4abb98664b 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -655,7 +655,8 @@ ENTRY(__switch_to_asm)
movl%esp, TASK_threadsp(%eax)
movlTASK_threadsp(%edx), %esp
 
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && \
+   !defined(CONFIG_X86_GLOBAL_STACKPROTECTOR)
movlTASK_stack_canary(%edx), %ebx
movl%ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
 #endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index fc15fe058d3c..1ae9c85241dc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -296,7 +296,8 @@ ENTRY(__switch_to_asm)
movq%rsp, TASK_threadsp(%rdi)
movqTASK_threadsp(%rsi), %rsp
 
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && \
+   !defined(CONFIG_X86_GLOBAL_STACKPROTECTOR)
movqTASK_stack_canary(%rsi), %rbx
movq%rbx, PER_CPU_VAR(irq_stack_union + stack_canary_offset)
 #endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 18f1e8269ad7..d322b1789d94 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -416,7 +416,8 @@ extern asmlinkage void ignore_sysret(void);
 void save_fsgs_for_kvm(void);
 #endif
 #else  /* X86_64 */
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && \
+   !defined(CONFIG_X86_GLOBAL_STACKPROTECTOR)
 /*
  * Make sure stack canary segment base is cached-aligned:
  *   "For Intel Atom processors, avoid non zero segment base address
diff --git a/arch/x86/include/asm/stackprotector.h 
b/arch/x86/include/asm/stackprotector.h
index 8ec97a62c245..4e120cf36782 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackpr

[PATCH v6 21/27] x86/ftrace: Adapt function tracing for PIE support

2019-01-31 Thread Thomas Garnier
When using PIE with function tracing, the compiler generates a
call through the GOT (call *__fentry__@GOTPCREL). This instruction
takes 6-bytes instead of 5-bytes with a relative call.

If PIE is enabled, replace the 6th byte of the GOT call by a 1-byte nop
so ftrace can handle the previous 5-bytes as before.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Steven Rostedt (VMware) 
---
 arch/x86/kernel/ftrace.c | 51 --
 scripts/recordmcount.c   | 78 ++--
 2 files changed, 101 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8257a59704ae..82feb8c7a47e 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -102,7 +102,7 @@ static const unsigned char *ftrace_nop_replace(void)
 
 static int
 ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
-  unsigned const char *new_code)
+ unsigned const char *new_code)
 {
unsigned char replaced[MCOUNT_INSN_SIZE];
 
@@ -135,6 +135,53 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const 
char *old_code,
return 0;
 }
 
+/* Bytes before call GOT offset */
+static const unsigned char got_call_preinsn[] = { 0xff, 0x15 };
+
+static int
+ftrace_modify_initial_code(unsigned long ip, unsigned const char *old_code,
+  unsigned const char *new_code)
+{
+   unsigned char replaced[MCOUNT_INSN_SIZE + 1];
+
+   /*
+* If PIE is not enabled default to the original approach to code
+* modification.
+*/
+   if (!IS_ENABLED(CONFIG_X86_PIE))
+   return ftrace_modify_code_direct(ip, old_code, new_code);
+
+   ftrace_expected = old_code;
+
+   /* Ensure the instructions point to a call to the GOT */
+   if (probe_kernel_read(replaced, (void *)ip, sizeof(replaced))) {
+   WARN_ONCE(1, "invalid function");
+   return -EFAULT;
+   }
+
+   if (memcmp(replaced, got_call_preinsn, sizeof(got_call_preinsn))) {
+   WARN_ONCE(1, "invalid function call");
+   return -EINVAL;
+   }
+
+   /*
+* Build a nop slide with a 5-byte nop and 1-byte nop to keep the ftrace
+* hooking algorithm working with the expected 5 bytes instruction.
+*/
+   memset(replaced, ideal_nops[1][0], sizeof(replaced));
+   memcpy(replaced, new_code, MCOUNT_INSN_SIZE);
+
+   ip = text_ip_addr(ip);
+
+   if (probe_kernel_write((void *)ip, replaced, sizeof(replaced)))
+   return -EPERM;
+
+   sync_core();
+
+   return 0;
+
+}
+
 int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
 {
@@ -153,7 +200,7 @@ int ftrace_make_nop(struct module *mod,
 * just modify the code directly.
 */
if (addr == MCOUNT_ADDR)
-   return ftrace_modify_code_direct(rec->ip, old, new);
+   return ftrace_modify_initial_code(rec->ip, old, new);
 
ftrace_expected = NULL;
 
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index a50a2aa963ad..4b8bd746ed2e 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -171,33 +171,9 @@ umalloc(size_t size)
return addr;
 }
 
-static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
-static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
-static unsigned char *ideal_nop;
-
 static char rel_type_nop;
-
 static int (*make_nop)(void *map, size_t const offset);
-
-static int make_nop_x86(void *map, size_t const offset)
-{
-   uint32_t *ptr;
-   unsigned char *op;
-
-   /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */
-   ptr = map + offset;
-   if (*ptr != 0)
-   return -1;
-
-   op = map + offset - 1;
-   if (*op != 0xe8)
-   return -1;
-
-   /* convert to nop */
-   ulseek(fd_map, offset - 1, SEEK_SET);
-   uwrite(fd_map, ideal_nop, 5);
-   return 0;
-}
+static unsigned char *ideal_nop;
 
 static unsigned char ideal_nop4_arm_le[4] = { 0x00, 0x00, 0xa0, 0xe1 }; /* mov 
r0, r0 */
 static unsigned char ideal_nop4_arm_be[4] = { 0xe1, 0xa0, 0x00, 0x00 }; /* mov 
r0, r0 */
@@ -447,6 +423,49 @@ static void MIPS64_r_info(Elf64_Rel *const rp, unsigned 
sym, unsigned type)
}).r_info;
 }
 
+static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
+static unsigned char ideal_nop6_x86_64[6] = { 0x66, 0x0f, 0x1f, 0x44, 0x00, 
0x00 };
+static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
+static size_t ideal_nop_x86_size;
+
+static unsigned char stub_default_x86[2] = { 0xe8, 0x00 };   /* call relative 
*/
+static unsigned char stub_got_x86[3] = { 0xff, 0x15, 0x00 }; /* call .got */
+stat

[PATCH v6 22/27] x86/modules: Add option to start module section after kernel

2019-01-31 Thread Thomas Garnier
Add an option so the module section is just after the mapped kernel. It
will ensure position independent modules are always at the right
distance from the kernel and do not require mcmodule=large. It also
optimize the available size for modules by getting rid of the empty
space on kernel randomization range.

Signed-off-by: Thomas Garnier 
---
 Documentation/x86/x86_64/mm.txt | 3 +++
 arch/x86/Kconfig| 4 
 arch/x86/include/asm/pgtable_64_types.h | 6 ++
 arch/x86/kernel/head64.c| 5 -
 arch/x86/mm/dump_pagetables.c   | 3 ++-
 5 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 804f9426ed17..35b845d695d5 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -151,3 +151,6 @@ correct as KASAN disables KASLR.
 
 For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
 hole: 4111
+
+If CONFIG_DYNAMIC_MODULE_BASE is enabled, the module section follows the end of
+the mapped kernel.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 263d81c570b2..c3ad1b0ae1a1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2221,6 +2221,10 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
 
   If unsure, leave at the default value.
 
+# Module section starts just after the end of the kernel module
+config DYNAMIC_MODULE_BASE
+   bool
+
 config X86_GLOBAL_STACKPROTECTOR
bool "Stack cookie using a global variable"
depends on CC_STACKPROTECTOR_AUTO
diff --git a/arch/x86/include/asm/pgtable_64_types.h 
b/arch/x86/include/asm/pgtable_64_types.h
index 88bca456da99..d1bb676ec376 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -7,6 +7,7 @@
 #ifndef __ASSEMBLY__
 #include 
 #include 
+#include 
 
 /*
  * These are used to make use of C type-checking..
@@ -141,7 +142,12 @@ extern unsigned int ptrs_per_p4d;
 
 #define VMALLOC_END(VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
 
+#ifdef CONFIG_DYNAMIC_MODULE_BASE
+#define MODULES_VADDR  ALIGN(((unsigned long)_end + PAGE_SIZE), 
PMD_SIZE)
+#else
 #define MODULES_VADDR  (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+#endif
+
 /* The module sections ends with the start of the fixmap */
 #define MODULES_END_AC(0xff00, UL)
 #define MODULES_LEN(MODULES_END - MODULES_VADDR)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 22e81275495b..ca2f6ff431af 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -411,12 +411,15 @@ asmlinkage __visible void __init x86_64_start_kernel(char 
* real_mode_data)
 * Build-time sanity checks on the kernel image and module
 * area mappings. (these are purely build-time and produce no code)
 */
+#ifndef CONFIG_DYNAMIC_MODULE_BASE
BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
-   BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
+   BUILD_BUG_ON(!IS_ENABLED(CONFIG_RANDOMIZE_BASE_LARGE) &&
+MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
+#endif
MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
(__START_KERNEL & PGDIR_MASK)));
BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index e3cdc85ce5b6..3172bd968215 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -104,7 +104,7 @@ static struct addr_marker address_markers[] = {
[EFI_END_NR]= { EFI_VA_END, "EFI Runtime Services" 
},
 #endif
[HIGH_KERNEL_NR]= { __START_KERNEL_map, "High Kernel Mapping" },
-   [MODULES_VADDR_NR]  = { MODULES_VADDR,  "Modules" },
+   [MODULES_VADDR_NR]  = { 0/*MODULES_VADDR*/, "Modules" },
[MODULES_END_NR]= { MODULES_END,"End Modules" },
[FIXADDR_START_NR]  = { FIXADDR_START,  "Fixmap Area" },
[END_OF_SPACE_NR]   = { -1, NULL }
@@ -623,6 +623,7 @@ static int __init pt_dump_init(void)
address_markers[KASAN_SHADOW_START_NR].start_address = 
KASAN_SHADOW_START;
address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
 #endif
+   address_markers[MODULES_VADDR_NR].start_address = MODULES_VADDR;
 #endif
 #ifdef CONFIG_X86_32
address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 26/27] x86/relocs: Add option to generate 64-bit relocations

2019-01-31 Thread Thomas Garnier
The x86 relocation tool generates a list of 32-bit signed integers. There
was no need to use 64-bit integers because all addresses where above the 2G
top of the memory.

This change add a large-reloc option to generate 64-bit unsigned integers.
It can be used when the kernel plan to go below the top 2G and 32-bit
integers are not enough.

Signed-off-by: Thomas Garnier 
---
 arch/x86/tools/relocs.c| 61 +++---
 arch/x86/tools/relocs.h|  4 +--
 arch/x86/tools/relocs_common.c | 15 ++---
 3 files changed, 61 insertions(+), 19 deletions(-)

diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 1b5ee38446b6..b4169eed37ab 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -13,8 +13,14 @@
 
 static Elf_Ehdr ehdr;
 
+#if ELF_BITS == 64
+typedef uint64_t rel_off_t;
+#else
+typedef uint32_t rel_off_t;
+#endif
+
 struct relocs {
-   uint32_t*offset;
+   rel_off_t   *offset;
unsigned long   count;
unsigned long   size;
 };
@@ -690,7 +696,7 @@ static void print_absolute_relocs(void)
printf("\n");
 }
 
-static void add_reloc(struct relocs *r, uint32_t offset)
+static void add_reloc(struct relocs *r, rel_off_t offset)
 {
if (r->count == r->size) {
unsigned long newsize = r->size + 5;
@@ -1075,26 +1081,49 @@ static void sort_relocs(struct relocs *r)
qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
 }
 
-static int write32(uint32_t v, FILE *f)
+static int write32(rel_off_t rel, FILE *f)
 {
-   unsigned char buf[4];
+   unsigned char buf[sizeof(uint32_t)];
+   uint32_t v = (uint32_t)rel;
 
put_unaligned_le32(v, buf);
-   return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
+   return fwrite(buf, 1, sizeof(buf), f) == sizeof(buf) ? 0 : -1;
 }
 
-static int write32_as_text(uint32_t v, FILE *f)
+static int write32_as_text(rel_off_t rel, FILE *f)
 {
+   uint32_t v = (uint32_t)rel;
return fprintf(f, "\t.long 0x%08"PRIx32"\n", v) > 0 ? 0 : -1;
 }
 
-static void emit_relocs(int as_text, int use_real_mode)
+static int write64(rel_off_t rel, FILE *f)
+{
+   unsigned char buf[sizeof(uint64_t)];
+   uint64_t v = (uint64_t)rel;
+
+   put_unaligned_le64(v, buf);
+   return fwrite(buf, 1, sizeof(buf), f) == sizeof(buf) ? 0 : -1;
+}
+
+static int write64_as_text(rel_off_t rel, FILE *f)
+{
+   uint64_t v = (uint64_t)rel;
+
+   return fprintf(f, "\t.quad 0x%016"PRIx64"\n", v) > 0 ? 0 : -1;
+}
+
+static void emit_relocs(int as_text, int use_real_mode, int use_large_reloc)
 {
int i;
-   int (*write_reloc)(uint32_t, FILE *) = write32;
+   int (*write_reloc)(rel_off_t rel, FILE *f);
int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
const char *symname);
 
+   if (use_large_reloc)
+   write_reloc = write64;
+   else
+   write_reloc = write32;
+
 #if ELF_BITS == 64
if (!use_real_mode)
do_reloc = do_reloc64;
@@ -1105,6 +1134,9 @@ static void emit_relocs(int as_text, int use_real_mode)
do_reloc = do_reloc32;
else
do_reloc = do_reloc_real;
+
+   /* Large relocations only for 64-bit */
+   use_large_reloc = 0;
 #endif
 
/* Collect up the relocations */
@@ -1128,8 +1160,13 @@ static void emit_relocs(int as_text, int use_real_mode)
 * gas will like.
 */
printf(".section \".data.reloc\",\"a\"\n");
-   printf(".balign 4\n");
-   write_reloc = write32_as_text;
+   if (use_large_reloc) {
+   printf(".balign 8\n");
+   write_reloc = write64_as_text;
+   } else {
+   printf(".balign 4\n");
+   write_reloc = write32_as_text;
+   }
}
 
if (use_real_mode) {
@@ -1197,7 +1234,7 @@ static void print_reloc_info(void)
 
 void process(FILE *fp, int use_real_mode, int as_text,
 int show_absolute_syms, int show_absolute_relocs,
-int show_reloc_info)
+int show_reloc_info, int use_large_reloc)
 {
regex_init(use_real_mode);
read_ehdr(fp);
@@ -1220,5 +1257,5 @@ void process(FILE *fp, int use_real_mode, int as_text,
print_reloc_info();
return;
}
-   emit_relocs(as_text, use_real_mode);
+   emit_relocs(as_text, use_real_mode, use_large_reloc);
 }
diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h
index 43c83c0fd22c..3d401da59df7 100644
--- a/arch/x86/tools/relocs.h
+++ b/arch/x86/tools/relocs.h
@@ -31,8 +31,8 @@ enum symtype {
 
 void process_32(FILE *fp, int use_real_mode, int as_text,
int show_absolute_

[PATCH v6 23/27] x86/modules: Adapt module loading for PIE support

2019-01-31 Thread Thomas Garnier
Adapt module loading to support PIE relocations. Generate dynamic GOT if
a symbol requires it but no entry exists in the kernel GOT.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/Makefile   |   4 +
 arch/x86/include/asm/module.h   |  11 ++
 arch/x86/include/asm/sections.h |   4 +
 arch/x86/kernel/module.c| 181 +++-
 arch/x86/kernel/module.lds  |   3 +
 5 files changed, 198 insertions(+), 5 deletions(-)
 create mode 100644 arch/x86/kernel/module.lds

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 65d6d9a1dd22..5e9c1b02cf87 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -129,8 +129,12 @@ else
 KBUILD_CFLAGS += $(cflags-y)
 
 KBUILD_CFLAGS += -mno-red-zone
+ifdef CONFIG_X86_PIE
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/x86/kernel/module.lds
+else
 KBUILD_CFLAGS += -mcmodel=kernel
 endif
+endif
 
 ifdef CONFIG_X86_GLOBAL_STACKPROTECTOR
 ifeq ($(call cc-option, -mstack-protector-guard=global),)
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 7948a17febb4..68ff05e14288 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,12 +5,23 @@
 #include 
 #include 
 
+#ifdef CONFIG_X86_PIE
+struct mod_got_sec {
+   struct elf64_shdr   *got;
+   int got_num_entries;
+   int got_max_entries;
+};
+#endif
+
 struct mod_arch_specific {
 #ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
 #endif
+#ifdef CONFIG_X86_PIE
+   struct mod_got_sec  core;
+#endif
 };
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 8ea1cfdbeabc..d4468309d743 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -13,4 +13,8 @@ extern char __end_rodata_aligned[];
 extern char __end_rodata_hpage_align[];
 #endif
 
+#if defined(CONFIG_X86_PIE)
+extern char __start_got[], __end_got[];
+#endif
+
 #endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index b052e883dd8c..f0d3ed92049e 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -77,6 +78,173 @@ static unsigned long int get_module_load_offset(void)
 }
 #endif
 
+#ifdef CONFIG_X86_PIE
+static u64 find_got_kernel_entry(Elf64_Sym *sym, const Elf64_Rela *rela)
+{
+   u64 *pos;
+
+   for (pos = (u64 *)__start_got; pos < (u64 *)__end_got; pos++) {
+   if (*pos == sym->st_value)
+   return (u64)pos + rela->r_addend;
+   }
+
+   return 0;
+}
+
+static u64 module_emit_got_entry(struct module *mod, void *loc,
+const Elf64_Rela *rela, Elf64_Sym *sym)
+{
+   struct mod_got_sec *gotsec = >arch.core;
+   u64 *got = (u64 *)gotsec->got->sh_addr;
+   int i = gotsec->got_num_entries;
+   u64 ret;
+
+   /* Check if we can use the kernel GOT */
+   ret = find_got_kernel_entry(sym, rela);
+   if (ret)
+   return ret;
+
+   got[i] = sym->st_value;
+
+   /*
+* Check if the entry we just created is a duplicate. Given that the
+* relocations are sorted, this will be the last entry we allocated.
+* (if one exists).
+*/
+   if (i > 0 && got[i] == got[i - 2]) {
+   ret = (u64)[i - 1];
+   } else {
+   gotsec->got_num_entries++;
+   BUG_ON(gotsec->got_num_entries > gotsec->got_max_entries);
+   ret = (u64)[i];
+   }
+
+   return ret + rela->r_addend;
+}
+
+#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+   const Elf64_Rela *x = a, *y = b;
+   int i;
+
+   /* sort by type, symbol index and addend */
+   i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+   if (i == 0)
+   i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+   if (i == 0)
+   i = cmp_3way(x->r_addend, y->r_addend);
+   return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+   /*
+* Entries are sorted by type, symbol index and addend. That means
+* that, if a duplicate entry exists, it must be in the preceding
+* slot.
+*/
+   return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_gots(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+{
+   unsigned int ret = 0;
+   Elf64_Sym *s;
+   int i;
+
+   for (i = 0; i < num; i++) {
+ 

[PATCH v6 27/27] x86/kaslr: Add option to extend KASLR range from 1GB to 3GB

2019-01-31 Thread Thomas Garnier
Add a new CONFIG_RANDOMIZE_BASE_LARGE option to benefit from PIE
support. It increases the KASLR range from 1GB to 3GB. The new range
stars at 0x just above the EFI memory region. This
option is off by default.

The boot code is adapted to create the appropriate page table spanning
three PUD pages.

The relocation table uses 64-bit integers generated with the updated
relocation tool with the large-reloc option.

Signed-off-by: Thomas Garnier 
---
 Makefile |  3 +++
 arch/x86/Kconfig | 21 +
 arch/x86/boot/compressed/Makefile|  5 +
 arch/x86/boot/compressed/misc.c  | 10 +-
 arch/x86/include/asm/page_64_types.h | 10 ++
 arch/x86/kernel/head64.c | 15 ---
 arch/x86/kernel/head_64.S| 11 ++-
 7 files changed, 70 insertions(+), 5 deletions(-)

diff --git a/Makefile b/Makefile
index 6e4f0dba45bb..41e0aa0c06b0 100644
--- a/Makefile
+++ b/Makefile
@@ -1106,6 +1106,8 @@ genheader:
 PHONY += prepare-objtool
 prepare-objtool: $(objtool_target)
 ifeq ($(SKIP_STACK_VALIDATION),1)
+# CONFIG_STACK_VALIDATION is not yet support by CONFIG_X86_PIE and warning is 
displayed before.
+ifndef CONFIG_X86_PIE
 ifdef CONFIG_UNWINDER_ORC
@echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, 
please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
@false
@@ -1113,6 +1115,7 @@ else
@echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install 
libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
 endif
 endif
+endif
 
 # Generate some files
 # ---
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e4316b8ed130..e61e4fafa1a0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2245,6 +2245,27 @@ config X86_PIE
select DYNAMIC_MODULE_BASE
select MODULE_REL_CRCS if MODVERSIONS
 
+config RANDOMIZE_BASE_LARGE
+   bool "Increase the randomization range of the kernel image"
+   depends on X86_64 && RANDOMIZE_BASE
+   select X86_PIE
+   select X86_MODULE_PLTS if MODULES
+   default n
+   help
+ Build the kernel as a Position Independent Executable (PIE) and
+ increase the available randomization range from 1GB to 3GB.
+
+ This option impacts performance on kernel CPU intensive workloads up
+ to 10% due to PIE generated code. Impact on user-mode processes and
+ typical usage would be significantly less (0.50% when you build the
+ kernel).
+
+ The kernel and modules will generate slightly more assembly (1 to 2%
+ increase on the .text sections). The vmlinux binary will be
+ significantly smaller due to fewer relocations.
+
+ If unsure say N
+
 config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
diff --git a/arch/x86/boot/compressed/Makefile 
b/arch/x86/boot/compressed/Makefile
index f0515ac895a4..02d1ba4877a0 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -121,7 +121,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE
 
 targets += $(patsubst $(obj)/%,%,$(vmlinux-objs-y)) vmlinux.bin.all 
vmlinux.relocs
 
+# Large randomization require bigger relocation table
+ifeq ($(CONFIG_RANDOMIZE_BASE_LARGE),y)
+CMD_RELOCS = arch/x86/tools/relocs --large-reloc
+else
 CMD_RELOCS = arch/x86/tools/relocs
+endif
 quiet_cmd_relocs = RELOCS  $@
   cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
 $(obj)/vmlinux.relocs: vmlinux FORCE
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 8dd1d5ccae58..28d17bd5bad8 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -171,10 +171,18 @@ void __puthex(unsigned long value)
 }
 
 #if CONFIG_X86_NEED_RELOCS
+
+/* Large randomization go lower than -2G and use large relocation table */
+#ifdef CONFIG_RANDOMIZE_BASE_LARGE
+typedef long rel_t;
+#else
+typedef int rel_t;
+#endif
+
 static void handle_relocations(void *output, unsigned long output_len,
   unsigned long virt_addr)
 {
-   int *reloc;
+   rel_t *reloc;
unsigned long delta, map, ptr;
unsigned long min_addr = (unsigned long)output;
unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
diff --git a/arch/x86/include/asm/page_64_types.h 
b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..acd4f3b400ca 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -48,7 +48,11 @@
 #define __PAGE_OFFSET   __PAGE_OFFSET_BASE_L4
 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
 
+#ifdef CONFIG_RANDOMIZE_BASE_LARGE
+#define __START_KERNEL_map _AC(0x, UL)
+#else
 #define __START_KERNEL_map _AC(0x8000

[PATCH v6 18/27] xen: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use the new _ASM_MOVABS macro which get a
symbol reference while being PIE compatible. Adapt the relocation tool
to ignore 32-bit Xen code.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Reviewed-by: Juergen Gross 
---
 arch/x86/platform/pvh/head.S | 14 ++
 arch/x86/tools/relocs.c  | 16 +++-
 arch/x86/xen/xen-head.S  | 11 ++-
 3 files changed, 31 insertions(+), 10 deletions(-)

diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..e52d8b31e01d 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -103,8 +103,8 @@ ENTRY(pvh_start_xen)
call xen_prepare_pvh
 
/* startup_64 expects boot_params in %rsi. */
-   mov $_pa(pvh_bootparams), %rsi
-   mov $_pa(startup_64), %rax
+   movabs $_pa(pvh_bootparams), %rsi
+   movabs $_pa(startup_64), %rax
jmp *%rax
 
 #else /* CONFIG_X86_64 */
@@ -150,10 +150,16 @@ END(pvh_start_xen)
 
.section ".init.data","aw"
.balign 8
+   /*
+* Use an ASM_PTR (quad on x64) for _pa(gdt_start) because PIE requires
+* a pointer size storage value before applying the relocation. On
+* 32-bit _ASM_PTR will be a long which is aligned the space needed for
+* relocation.
+*/
 gdt:
.word gdt_end - gdt_start
-   .long _pa(gdt_start)
-   .word 0
+   _ASM_PTR _pa(gdt_start)
+   .balign 8
 gdt_start:
.quad 0x/* NULL descriptor */
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 2a3c703218cc..1b5ee38446b6 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -837,6 +837,16 @@ static int is_percpu_sym(ElfW(Sym) *sym, const char 
*symname)
strncmp(symname, "init_per_cpu_", 13);
 }
 
+/*
+ * Check if the 32-bit relocation is within the xenpvh 32-bit code.
+ * If so, ignores it.
+ */
+static int is_in_xenpvh_assembly(Elf_Addr offset)
+{
+   Elf_Sym *sym = sym_lookup("pvh_start_xen");
+   return sym && (offset >= sym->st_value) &&
+   (offset < (sym->st_value + sym->st_size));
+}
 
 static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
  const char *symname)
@@ -909,8 +919,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, 
ElfW(Sym) *sym,
 * the relocations are processed.
 * Make sure that the offset will fit.
 */
-   if (r_type != R_X86_64_64 && (int32_t)offset != (int64_t)offset)
+   if (r_type != R_X86_64_64 &&
+   (int32_t)offset != (int64_t)offset) {
+   if (is_in_xenpvh_assembly(offset))
+   break;
die("Relocation offset doesn't fit in 32 bits\n");
+   }
 
if (r_type == R_X86_64_64)
add_reloc(, offset);
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 5077ead5e59c..4418ff0a1d96 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -28,14 +28,15 @@ ENTRY(startup_xen)
 
/* Clear .bss */
xor %eax,%eax
-   mov $__bss_start, %_ASM_DI
-   mov $__bss_stop, %_ASM_CX
+   _ASM_MOVABS $__bss_start, %_ASM_DI
+   _ASM_MOVABS $__bss_stop, %_ASM_CX
sub %_ASM_DI, %_ASM_CX
shr $__ASM_SEL(2, 3), %_ASM_CX
rep __ASM_SIZE(stos)
 
-   mov %_ASM_SI, xen_start_info
-   mov $init_thread_union+THREAD_SIZE, %_ASM_SP
+   _ASM_MOVABS $xen_start_info, %_ASM_AX
+   _ASM_MOV %_ASM_SI, (%_ASM_AX)
+   _ASM_MOVABS $init_thread_union+THREAD_SIZE, %_ASM_SP
 
 #ifdef CONFIG_X86_64
/* Set up %gs.
@@ -46,7 +47,7 @@ ENTRY(startup_xen)
 * init data section till per cpu areas are set up.
 */
movl$MSR_GS_BASE,%ecx
-   movq$INIT_PER_CPU_VAR(irq_stack_union),%rax
+   movabsq $INIT_PER_CPU_VAR(irq_stack_union),%rax
cdq
wrmsr
 #endif
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 16/27] compiler: Option to add PROVIDE_HIDDEN replacement for weak symbols

2019-01-31 Thread Thomas Garnier
Provide an option to have a PROVIDE_HIDDEN (linker script) entry for
each weak symbol. This option solves an error in x86_64 where the linker
optimizes PIE generated code to be non-PIE because --emit-relocs was used
instead of -pie (to reduce dynamic relocations).

Signed-off-by: Thomas Garnier 
---
 init/Kconfig|  7 +++
 scripts/link-vmlinux.sh | 14 ++
 2 files changed, 21 insertions(+)

diff --git a/init/Kconfig b/init/Kconfig
index 116e0de4817f..2261c9891631 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2104,6 +2104,13 @@ config ASN1
  inform it as to what tags are to be expected in a stream and what
  functions to call on what tags.
 
+config WEAK_PROVIDE_HIDDEN
+   bool
+   help
+ Generate linker script PROVIDE_HIDDEN entries for all weak symbols. It
+ allows to prevent non-PIE code being replaced by the linker if the
+ emit-relocs option is used instead of PIE (useful for x86_64 PIE).
+
 source "kernel/Kconfig.locks"
 
 config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index bc7f1fc1f55b..abf44a804c79 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -119,6 +119,17 @@ kallsyms()
${CC} ${aflags} -c -o ${2} ${afile}
 }
 
+gen_weak_provide_hidden()
+{
+if [ -n "${CONFIG_WEAK_PROVIDE_HIDDEN}" ]; then
+local pattern="s/^\s\+ w \(\w\+\)$/PROVIDE_HIDDEN(\1 = .);/gp"
+echo -e "SECTIONS {\n. = _end;" > .tmp_vmlinux_hiddenld
+${NM} ${1} | sed -n "${pattern}" >> .tmp_vmlinux_hiddenld
+echo "}" >> .tmp_vmlinux_hiddenld
+LDFLAGS_vmlinux="${LDFLAGS_vmlinux} -T .tmp_vmlinux_hiddenld"
+fi
+}
+
 # Create map file with all symbols from ${1}
 # See mksymap for additional details
 mksysmap()
@@ -200,6 +211,9 @@ modpost_link vmlinux.o
 # modpost vmlinux.o to check for section mismatches
 ${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
 
+# Generate weak linker script
+gen_weak_provide_hidden vmlinux.o
+
 kallsymso=""
 kallsyms_vmlinux=""
 if [ -n "${CONFIG_KALLSYMS}" ]; then
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 19/27] kvm: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible. The new __ASM_MOVABS macro is used to
get the address of a symbol on both 32 and 64-bit with PIE support.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/kvm_host.h | 8 ++--
 arch/x86/kernel/kvm.c   | 6 --
 arch/x86/kvm/svm.c  | 4 ++--
 arch/x86/kvm/vmx/vmx.c  | 2 +-
 4 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4660ce90de7f..fdb3307d5fe1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1498,9 +1498,13 @@ asmlinkage void kvm_spurious_fault(void);
".pushsection .fixup, \"ax\" \n" \
"667: \n\t" \
cleanup_insn "\n\t"   \
-   "cmpb $0, kvm_rebooting \n\t" \
+   "cmpb $0, kvm_rebooting" __ASM_SEL(, (%%rip)) " \n\t" \
"jne 668b \n\t"   \
-   __ASM_SIZE(push) " $666b \n\t"\
+   __ASM_SIZE(push) "$0 \n\t"  \
+   __ASM_SIZE(push) "%%" _ASM_AX " \n\t"   \
+   _ASM_MOVABS " $666b, %%" _ASM_AX "\n\t" \
+   _ASM_MOV " %%" _ASM_AX ", " __ASM_SEL(4, 8) "(%%" _ASM_SP ") \n\t" \
+   __ASM_SIZE(pop) "%%" _ASM_AX " \n\t"\
"jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5c93a65ee1e5..f6eb02004e43 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -826,8 +826,10 @@ asm(
 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
 "__raw_callee_save___kvm_vcpu_is_preempted:"
-"movq  __per_cpu_offset(,%rdi,8), %rax;"
-"cmpb  $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
+"leaq  __per_cpu_offset(%rip), %rax;"
+"movq  (%rax,%rdi,8), %rax;"
+"addq  " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rip), %rax;"
+"cmpb  $0, (%rax);"
 "setne %al;"
 "ret;"
 ".popsection");
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f13a3a24d360..26abb82b1b67 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -706,12 +706,12 @@ static u32 svm_msrpm_offset(u32 msr)
 
 static inline void clgi(void)
 {
-   asm volatile (__ex("clgi"));
+   asm volatile (__ex("clgi") : :);
 }
 
 static inline void stgi(void)
 {
-   asm volatile (__ex("stgi"));
+   asm volatile (__ex("stgi") : :);
 }
 
 static inline void invlpga(unsigned long addr, u32 asid)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4341175339f3..3275761a7375 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2161,7 +2161,7 @@ static void vmclear_local_loaded_vmcss(void)
  */
 static void kvm_cpu_vmxoff(void)
 {
-   asm volatile (__ex("vmxoff"));
+   asm volatile (__ex("vmxoff") :::);
 
intel_pt_handle_vmx(0);
cr4_clear_bits(X86_CR4_VMXE);
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 12/27] x86/alternatives: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly options to work with pointers instead of integers.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/alternative.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/alternative.h 
b/arch/x86/include/asm/alternative.h
index 4c74073a19cc..f5d6fe1d294b 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -233,7 +233,7 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
 /* Like alternative_io, but for replacing a direct call with another one. */
 #define alternative_call(oldfunc, newfunc, feature, output, input...)  \
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
-   : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+   : output : [old] "X" (oldfunc), [new] "X" (newfunc), ## input)
 
 /*
  * Like alternative_call, but there are two features and respective functions.
@@ -246,8 +246,8 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2)\
: output, ASM_CALL_CONSTRAINT \
-   : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
- [new2] "i" (newfunc2), ## input)
+   : [old] "X" (oldfunc), [new1] "X" (newfunc1), \
+ [new2] "X" (newfunc2), ## input)
 
 /*
  * use this macro(s) if you need more than one output parameter
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 13/27] x86/boot/64: Build head64.c as mcmodel large when PIE is enabled

2019-01-31 Thread Thomas Garnier
The __startup_64 function assumes all symbols have relocated addresses
instead of the current boot virtual address. PIE generated code favor
relative addresses making all virtual and physical address math incorrect.
If PIE is enabled, build head64.c as mcmodel large instead to ensure absolute
references on all memory access. Add a global __force_order variable required
when using a large model with read_cr* functions.

To build head64.c as mcmodel=large, disable the retpoline gcc flags.
This code is used at early boot and removed later, it doesn't need
retpoline mitigation.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/Makefile | 6 ++
 arch/x86/kernel/head64.c | 3 +++
 2 files changed, 9 insertions(+)

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 00b7e27bc2b7..1f98f52eab9f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -22,6 +22,12 @@ CFLAGS_REMOVE_early_printk.o = -pg
 CFLAGS_REMOVE_head64.o = -pg
 endif
 
+ifdef CONFIG_X86_PIE
+# Remove PIE and retpoline flags that are incompatible with mcmodel=large
+CFLAGS_REMOVE_head64.o += -fPIE -mindirect-branch=thunk-extern 
-mindirect-branch-register
+CFLAGS_head64.o = -mcmodel=large
+endif
+
 KASAN_SANITIZE_head$(BITS).o   := n
 KASAN_SANITIZE_dumpstack.o := n
 KASAN_SANITIZE_dumpstack_$(BITS).o := n
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 16b1cbd3a61e..22e81275495b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -63,6 +63,9 @@ EXPORT_SYMBOL(vmemmap_base);
 
 #define __head __section(.head.text)
 
+/* Required for read_cr3 when building as PIE */
+unsigned long __force_order;
+
 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
 {
return ptr - (void *)_text + (void *)physaddr;
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 14/27] x86/percpu: Adapt percpu for PIE support

2019-01-31 Thread Thomas Garnier
Perpcu uses a clever design where the .percu ELF section has a virtual
address of zero and the custom linux relocation code avoid relocating
specific symbols. It makes the code simple and easily adaptable with or
without SMP support.

This design is incompatible with PIE. While creating a PIE binary, the
copmiler tries to make everything relative. The compiler will attempt to
generate instructions with the distance between zero and any 64-bit
virtual address. It will fail as the relocation range cannot fit within
the possible instructions accessing a segment register.

This patch solves tihs problem by removing the zero mapping. The .percpu
symbols are now close to the base of the kernel and the compiler
generates appropriate relocations. To accomodate this change, the GS base
is adapted to be the difference between zero and the .percpu section
address. These changes are done only when PIE is enabled. The original
implementation is kept as-is by default.

The assembly and PER_CPU macros are changed to use relative references
when PIE is enabled.

The KALLSYMS_ABSOLUTE_PERCPU configuration is disabled with PIE given
percpu symbols are not absolute in this case.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/entry/calling.h |  2 +-
 arch/x86/entry/entry_64.S|  4 ++--
 arch/x86/include/asm/percpu.h| 25 +++--
 arch/x86/include/asm/processor.h |  4 +++-
 arch/x86/kernel/head_64.S|  4 
 arch/x86/kernel/setup_percpu.c   |  5 -
 arch/x86/kernel/vmlinux.lds.S| 13 +++--
 arch/x86/lib/cmpxchg16b_emu.S|  8 
 arch/x86/xen/xen-asm.S   | 12 ++--
 init/Kconfig |  2 +-
 10 files changed, 55 insertions(+), 24 deletions(-)

diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index efb0d1b1f15f..d5a6d3a0c24b 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -218,7 +218,7 @@ For 32-bit we have the following conventions - kernel is 
built with
 .endm
 
 #define THIS_CPU_user_pcid_flush_mask   \
-   PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
+   PER_CPU_VAR(cpu_tlbstate + TLB_STATE_user_pcid_flush_mask)
 
 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 16a93eb4c11f..fc15fe058d3c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -298,7 +298,7 @@ ENTRY(__switch_to_asm)
 
 #ifdef CONFIG_STACKPROTECTOR
movqTASK_stack_canary(%rsi), %rbx
-   movq%rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+   movq%rbx, PER_CPU_VAR(irq_stack_union + stack_canary_offset)
 #endif
 
 #ifdef CONFIG_RETPOLINE
@@ -841,7 +841,7 @@ apicinterrupt IRQ_WORK_VECTOR   
irq_work_interrupt  smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw + (TSS_ist + ((x) - 1) * 8))
 
 /**
  * idtentry - Generate an IDT entry stub
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 1a19d11cfbbd..608c15751f29 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -5,9 +5,11 @@
 #ifdef CONFIG_X86_64
 #define __percpu_seg   gs
 #define __percpu_mov_opmovq
+#define __percpu_rel   (%rip)
 #else
 #define __percpu_seg   fs
 #define __percpu_mov_opmovl
+#define __percpu_rel
 #endif
 
 #ifdef __ASSEMBLY__
@@ -28,10 +30,14 @@
 #define PER_CPU(var, reg)  \
__percpu_mov_op %__percpu_seg:this_cpu_off, reg;\
lea var(reg), reg
-#define PER_CPU_VAR(var)   %__percpu_seg:var
+/* Compatible with Position Independent Code */
+#define PER_CPU_VAR(var)   %__percpu_seg:(var)##__percpu_rel
+/* Rare absolute reference */
+#define PER_CPU_VAR_ABS(var)   %__percpu_seg:var
 #else /* ! SMP */
 #define PER_CPU(var, reg)  __percpu_mov_op $var, reg
-#define PER_CPU_VAR(var)   var
+#define PER_CPU_VAR(var)   (var)##__percpu_rel
+#define PER_CPU_VAR_ABS(var)   var
 #endif /* SMP */
 
 #ifdef CONFIG_X86_64_SMP
@@ -209,27 +215,34 @@ do {  
\
pfo_ret__;  \
 })
 
+/* Position Independent code uses relative addresses only */
+#ifdef CONFIG_X86_PIE
+#define __percpu_stable_arg __percpu_arg(a1)
+#else
+#define __percpu_stable_arg __percpu_arg(P1)
+#endif
+
 #define percpu_stable_op(op, var)  \
 ({ \
t

[PATCH v6 10/27] x86/power/64: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
---
 arch/x86/power/hibernate_asm_64.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index 3008baa2fa95..9ed980efef72 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -24,7 +24,7 @@
 #include 
 
 ENTRY(swsusp_arch_suspend)
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -115,7 +115,7 @@ ENTRY(restore_registers)
movq%rax, %cr4;  # turn PGE back on
 
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqpt_regs_sp(%rax), %rsp
movqpt_regs_bp(%rax), %rbp
movqpt_regs_si(%rax), %rsi
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 09/27] x86/boot/64: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Early at boot, the kernel is mapped at a temporary address while preparing
the page table. To know the changes needed for the page table with KASLR,
the boot code calculate the difference between the expected address of the
kernel and the one chosen by KASLR. It does not work with PIE because all
symbols in code are relatives. Instead of getting the future relocated
virtual address, you will get the current temporary mapping.
Instructions were changed to have absolute 64-bit references.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/head_64.S | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d1dbe8e4eb82..b9b6c6aa0313 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -90,8 +90,10 @@ startup_64:
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(early_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(early_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
jmp 1f
+
 ENTRY(secondary_startup_64)
UNWIND_HINT_EMPTY
/*
@@ -120,7 +122,8 @@ ENTRY(secondary_startup_64)
popq%rsi
 
/* Form the CR3 value being sure to include the CR3 modifier */
-   addq$(init_top_pgt - __START_KERNEL_map), %rax
+   movabs  $(init_top_pgt - __START_KERNEL_map), %rcx
+   addq%rcx, %rax
 1:
 
/* Enable PAE mode, PGE and LA57 */
@@ -138,7 +141,7 @@ ENTRY(secondary_startup_64)
movq%rax, %cr3
 
/* Ensure I am executing from virtual addresses */
-   movq$1f, %rax
+   movabs  $1f, %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
 1:
@@ -235,11 +238,12 @@ ENTRY(secondary_startup_64)
 *  REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
 *  address given in m16:64.
 */
-   pushq   $.Lafter_lret   # put return address on stack for unwinder
+   movabs  $.Lafter_lret, %rax
+   pushq   %rax# put return address on stack for unwinder
xorl%ebp, %ebp  # clear frame pointer
-   movqinitial_code(%rip), %rax
+   leaqinitial_code(%rip), %rax
pushq   $__KERNEL_CS# set correct cs
-   pushq   %rax# target address in negative space
+   pushq   (%rax)  # target address in negative space
lretq
 .Lafter_lret:
 END(secondary_startup_64)
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 01/27] x86/crypto: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/crypto/aegis128-aesni-asm.S |  6 +-
 arch/x86/crypto/aegis128l-aesni-asm.S|  8 +-
 arch/x86/crypto/aegis256-aesni-asm.S |  6 +-
 arch/x86/crypto/aes-x86_64-asm_64.S  | 45 +
 arch/x86/crypto/aesni-intel_asm.S|  8 +-
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 42 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 44 -
 arch/x86/crypto/camellia-x86_64-asm_64.S |  8 +-
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S| 50 +-
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S| 44 +
 arch/x86/crypto/des3_ede-asm_64.S| 96 +---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +-
 arch/x86/crypto/glue_helper-asm-avx.S|  4 +-
 arch/x86/crypto/glue_helper-asm-avx2.S   |  6 +-
 arch/x86/crypto/morus1280-avx2-asm.S |  4 +-
 arch/x86/crypto/morus1280-sse2-asm.S |  8 +-
 arch/x86/crypto/morus640-sse2-asm.S  |  6 +-
 arch/x86/crypto/sha256-avx2-asm.S| 23 +++--
 18 files changed, 236 insertions(+), 176 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-asm.S 
b/arch/x86/crypto/aegis128-aesni-asm.S
index 5f7e43d4f64a..a48f790954f8 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -203,8 +203,8 @@ ENTRY(crypto_aegis128_aesni_init)
movdqa KEY, STATE4
 
/* load the constants: */
-   movdqa .Laegis128_const_0, STATE2
-   movdqa .Laegis128_const_1, STATE1
+   movdqa .Laegis128_const_0(%rip), STATE2
+   movdqa .Laegis128_const_1(%rip), STATE1
pxor STATE2, STATE3
pxor STATE1, STATE4
 
@@ -684,7 +684,7 @@ ENTRY(crypto_aegis128_aesni_dec_tail)
punpcklbw T0, T0
punpcklbw T0, T0
punpcklbw T0, T0
-   movdqa .Laegis128_counter, T1
+   movdqa .Laegis128_counter(%rip), T1
pcmpgtb T1, T0
pand T0, MSG
 
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S 
b/arch/x86/crypto/aegis128l-aesni-asm.S
index 491dd61c845c..a097b8956af8 100644
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ b/arch/x86/crypto/aegis128l-aesni-asm.S
@@ -331,8 +331,8 @@ ENTRY(crypto_aegis128l_aesni_init)
pxor MSG0, STATE4
 
/* load the constants: */
-   movdqa .Laegis128l_const_0, STATE2
-   movdqa .Laegis128l_const_1, STATE1
+   movdqa .Laegis128l_const_0(%rip), STATE2
+   movdqa .Laegis128l_const_1(%rip), STATE1
movdqa STATE1, STATE3
pxor STATE2, STATE5
pxor STATE1, STATE6
@@ -765,8 +765,8 @@ ENTRY(crypto_aegis128l_aesni_dec_tail)
punpcklbw T0, T0
punpcklbw T0, T0
movdqa T0, T1
-   movdqa .Laegis128l_counter0, T2
-   movdqa .Laegis128l_counter1, T3
+   movdqa .Laegis128l_counter0(%rip), T2
+   movdqa .Laegis128l_counter1(%rip), T3
pcmpgtb T2, T0
pcmpgtb T3, T1
pand T0, MSG0
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S 
b/arch/x86/crypto/aegis256-aesni-asm.S
index 8870c7c5d9a4..195648b93fd1 100644
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ b/arch/x86/crypto/aegis256-aesni-asm.S
@@ -273,8 +273,8 @@ ENTRY(crypto_aegis256_aesni_init)
movdqa T3, STATE1
 
/* load the constants: */
-   movdqa .Laegis256_const_0, STATE3
-   movdqa .Laegis256_const_1, STATE2
+   movdqa .Laegis256_const_0(%rip), STATE3
+   movdqa .Laegis256_const_1(%rip), STATE2
pxor STATE3, STATE4
pxor STATE2, STATE5
 
@@ -647,7 +647,7 @@ ENTRY(crypto_aegis256_aesni_dec_tail)
punpcklbw T0, T0
punpcklbw T0, T0
punpcklbw T0, T0
-   movdqa .Laegis256_counter, T1
+   movdqa .Laegis256_counter(%rip), T1
pcmpgtb T1, T0
pand T0, MSG
 
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S 
b/arch/x86/crypto/aes-x86_64-asm_64.S
index 8739cf7795de..42eaacb589b3 100644
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
@@ -48,8 +48,12 @@
 #define R10%r10
 #define R11%r11
 
+/* Hold global for PIE support */
+#define RBASE  %r12
+
 #define prologue(FUNC,KEY,B128,B192,r1,r2,r5,r6,r7,r8,r9,r10,r11) \
ENTRY(FUNC);\
+   pushq   RBASE;  \
movqr1,r2;  \
leaqKEY+48(r8),r9;  \
movqr10,r11;\
@@ -74,54 +78,63 @@
movlr6 ## E,4(r9);  \
movlr7 ## E,8(r9);  \
movlr8 ## E,12(r9); \
+   popqRBASE;  \
ret;\
ENDPROC(FUNC);
 
+#define round_mov(tab_off, reg_i, reg_o) \
+   leaqtab_off(%rip), RBASE; \
+   movl(RBASE,reg_i,4), reg_o;
+
+#define

[PATCH v6 04/27] x86: relocate_kernel - Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only absolute references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/relocate_kernel_64.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index 11eda21eb697..3320368b6ec9 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -208,7 +208,7 @@ identity_mapped:
movq%rax, %cr3
lea PAGE_SIZE(%r8), %rsp
callswap_pages
-   movq$virtual_mapped, %rax
+   movabsq $virtual_mapped, %rax
pushq   %rax
ret
 
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 07/27] x86/CPU: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible. Use the new _ASM_MOVABS macro instead of
the 'mov $symbol, %dst' construct.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/processor.h | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 33051436c864..ce9851bf6778 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -723,11 +723,13 @@ static inline void sync_core(void)
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
-   "pushq $1f\n\t"
+   "movabsq $1f, %q0\n\t"
+   "pushq %q0\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
-   : "=" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+   : "=" (tmp), ASM_CALL_CONSTRAINT
+   : : "cc", "memory");
 #endif
 }
 
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 03/27] x86: Add macro to get symbol address for PIE support

2019-01-31 Thread Thomas Garnier
Add a new _ASM_MOVABS macro to fetch a symbol address. It will be used
to replace "_ASM_MOV $, %dst" code construct that are not compatible
with PIE.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/asm.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 6467757bb39f..5ff63ca80bca 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -30,6 +30,7 @@
 #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
 
 #define _ASM_MOV   __ASM_SIZE(mov)
+#define _ASM_MOVABS__ASM_SEL(movl, movabsq)
 #define _ASM_INC   __ASM_SIZE(inc)
 #define _ASM_DEC   __ASM_SIZE(dec)
 #define _ASM_ADD   __ASM_SIZE(add)
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 06/27] x86: pm-trace - Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change assembly to use the new _ASM_MOVABS macro instead of _ASM_MOV for
the assembly to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/pm-trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/pm-trace.h b/arch/x86/include/asm/pm-trace.h
index bfa32aa428e5..972070806ce9 100644
--- a/arch/x86/include/asm/pm-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -8,7 +8,7 @@
 do {   \
if (pm_trace_enabled) { \
const void *tracedata;  \
-   asm volatile(_ASM_MOV " $1f,%0\n"   \
+   asm volatile(_ASM_MOVABS " $1f,%0\n"\
 ".section .tracedata,\"a\"\n"  \
 "1:\t.word %c1\n\t"\
 _ASM_PTR " %c2\n"  \
-- 
2.20.1.495.gaa96b0ce6b-goog



[PATCH v6 08/27] x86/acpi: Adapt assembly for PIE support

2019-01-31 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
---
 arch/x86/kernel/acpi/wakeup_64.S | 31 ---
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 50b8ed0317a3..472659c0f811 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -14,7 +14,7 @@
 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 */
 ENTRY(wakeup_long64)
-   movqsaved_magic, %rax
+   movqsaved_magic(%rip), %rax
movq$0x123456789abcdef0, %rdx
cmpq%rdx, %rax
jne bogus_64_magic
@@ -25,14 +25,14 @@ ENTRY(wakeup_long64)
movw%ax, %es
movw%ax, %fs
movw%ax, %gs
-   movqsaved_rsp, %rsp
+   movqsaved_rsp(%rip), %rsp
 
-   movqsaved_rbx, %rbx
-   movqsaved_rdi, %rdi
-   movqsaved_rsi, %rsi
-   movqsaved_rbp, %rbp
+   movqsaved_rbx(%rip), %rbx
+   movqsaved_rdi(%rip), %rdi
+   movqsaved_rsi(%rip), %rsi
+   movqsaved_rbp(%rip), %rbp
 
-   movqsaved_rip, %rax
+   movqsaved_rip(%rip), %rax
jmp *%rax
 ENDPROC(wakeup_long64)
 
@@ -45,7 +45,7 @@ ENTRY(do_suspend_lowlevel)
xorl%eax, %eax
callsave_processor_state
 
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -64,13 +64,14 @@ ENTRY(do_suspend_lowlevel)
pushfq
popqpt_regs_flags(%rax)
 
-   movq$.Lresume_point, saved_rip(%rip)
+   leaq.Lresume_point(%rip), %rax
+   movq%rax, saved_rip(%rip)
 
-   movq%rsp, saved_rsp
-   movq%rbp, saved_rbp
-   movq%rbx, saved_rbx
-   movq%rdi, saved_rdi
-   movq%rsi, saved_rsi
+   movq%rsp, saved_rsp(%rip)
+   movq%rbp, saved_rbp(%rip)
+   movq%rbx, saved_rbx(%rip)
+   movq%rdi, saved_rdi(%rip)
+   movq%rsi, saved_rsi(%rip)
 
addq$8, %rsp
movl$3, %edi
@@ -82,7 +83,7 @@ ENTRY(do_suspend_lowlevel)
.align 4
 .Lresume_point:
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqsaved_context_cr4(%rax), %rbx
movq%rbx, %cr4
movqsaved_context_cr3(%rax), %rbx
-- 
2.20.1.495.gaa96b0ce6b-goog



Re: [PATCH 1/2] x86/mm/KASLR: Fix the wrong calculation of kalsr region initial size

2018-09-04 Thread Thomas Garnier
Thanks Baoquan!

Reviewed-by: Thomas Garnier 

On Wed, Aug 29, 2018 at 4:49 AM Kirill A. Shutemov  wrote:
>
> On Wed, Aug 29, 2018 at 10:17:53AM +0800, Baoquan He wrote:
> > In memory KASLR, __PHYSICAL_MASK_SHIFT is taken to calculate the
> > initial size of the direct mapping region. This is right in the
> > old code where __PHYSICAL_MASK_SHIFT was equal to MAX_PHYSMEM_BITS,
> > 46bit, and only 4-level mode was supported.
> >
> > Later, in commit:
> > b83ce5ee91471d ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52"),
> > __PHYSICAL_MASK_SHIFT was changed to be 52 always, no matter it's
> > 5-level or 4-level. This is wrong for 4-level paging. Then when
> > adapt phyiscal memory region size based on available memory, it
> > will overflow if the amount of system RAM and the padding is bigger
> > than 64TB.
> >
> > In fact, here MAX_PHYSMEM_BITS should be used instead. Fix it by
> > by replacing __PHYSICAL_MASK_SHIFT with MAX_PHYSMEM_BITS.
> >
> > Signed-off-by: Baoquan He 
>
> Acked-by: Kirill A. Shutemov 
>
> --
>  Kirill A. Shutemov



-- 
Thomas


Re: [PATCH 1/2] x86/mm/KASLR: Fix the wrong calculation of kalsr region initial size

2018-09-04 Thread Thomas Garnier
Thanks Baoquan!

Reviewed-by: Thomas Garnier 

On Wed, Aug 29, 2018 at 4:49 AM Kirill A. Shutemov  wrote:
>
> On Wed, Aug 29, 2018 at 10:17:53AM +0800, Baoquan He wrote:
> > In memory KASLR, __PHYSICAL_MASK_SHIFT is taken to calculate the
> > initial size of the direct mapping region. This is right in the
> > old code where __PHYSICAL_MASK_SHIFT was equal to MAX_PHYSMEM_BITS,
> > 46bit, and only 4-level mode was supported.
> >
> > Later, in commit:
> > b83ce5ee91471d ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52"),
> > __PHYSICAL_MASK_SHIFT was changed to be 52 always, no matter it's
> > 5-level or 4-level. This is wrong for 4-level paging. Then when
> > adapt phyiscal memory region size based on available memory, it
> > will overflow if the amount of system RAM and the padding is bigger
> > than 64TB.
> >
> > In fact, here MAX_PHYSMEM_BITS should be used instead. Fix it by
> > by replacing __PHYSICAL_MASK_SHIFT with MAX_PHYSMEM_BITS.
> >
> > Signed-off-by: Baoquan He 
>
> Acked-by: Kirill A. Shutemov 
>
> --
>  Kirill A. Shutemov



-- 
Thomas


Re: [PATCH v5 23/27] x86/modules: Adapt module loading for PIE support

2018-06-25 Thread Thomas Garnier
On Mon, Jun 25, 2018 at 4:51 PM Randy Dunlap  wrote:
>
> On 06/25/18 15:39, Thomas Garnier wrote:
> > Adapt module loading to support PIE relocations. Generate dynamic GOT if
> > a symbol requires it but no entry exists in the kernel GOT.
>
> Hi,
>
> This patch description (and a few others with the same sentence) is a little
> confusing to me:
>
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range 0x8000.
>
> Is there a word missing after "range"?  May "below"?
> As is, it seems to imply "range by 0x8000."

Yes, below. I will fix this on all patch descriptions on next iteration. Thanks!

>
>
> thanks.
>
> >
> > Signed-off-by: Thomas Garnier 
> > ---
> >  arch/x86/Makefile   |   4 +
> >  arch/x86/include/asm/module.h   |  11 ++
> >  arch/x86/include/asm/sections.h |   4 +
> >  arch/x86/kernel/module.c| 181 +++-
> >  arch/x86/kernel/module.lds  |   3 +
> >  5 files changed, 198 insertions(+), 5 deletions(-)
> >  create mode 100644 arch/x86/kernel/module.lds
>
>
> --
> ~Randy



-- 
Thomas


Re: [PATCH v5 23/27] x86/modules: Adapt module loading for PIE support

2018-06-25 Thread Thomas Garnier
On Mon, Jun 25, 2018 at 4:51 PM Randy Dunlap  wrote:
>
> On 06/25/18 15:39, Thomas Garnier wrote:
> > Adapt module loading to support PIE relocations. Generate dynamic GOT if
> > a symbol requires it but no entry exists in the kernel GOT.
>
> Hi,
>
> This patch description (and a few others with the same sentence) is a little
> confusing to me:
>
> > Position Independent Executable (PIE) support will allow to extend the
> > KASLR randomization range 0x8000.
>
> Is there a word missing after "range"?  May "below"?
> As is, it seems to imply "range by 0x8000."

Yes, below. I will fix this on all patch descriptions on next iteration. Thanks!

>
>
> thanks.
>
> >
> > Signed-off-by: Thomas Garnier 
> > ---
> >  arch/x86/Makefile   |   4 +
> >  arch/x86/include/asm/module.h   |  11 ++
> >  arch/x86/include/asm/sections.h |   4 +
> >  arch/x86/kernel/module.c| 181 +++-
> >  arch/x86/kernel/module.lds  |   3 +
> >  5 files changed, 198 insertions(+), 5 deletions(-)
> >  create mode 100644 arch/x86/kernel/module.lds
>
>
> --
> ~Randy



-- 
Thomas


[PATCH v5 02/27] x86: Use symbol name on bug table for PIE support

2018-06-25 Thread Thomas Garnier
Replace the %c constraint with %P. The %c is incompatible with PIE
because it implies an immediate value whereas %P reference a symbol.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/bug.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 6804d6642767..3d690a4abf50 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -35,7 +35,7 @@ do {  
\
asm volatile("1:\t" ins "\n"\
 ".pushsection __bug_table,\"aw\"\n"\
 "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
-"\t"  __BUG_REL(%c0) "\t# bug_entry::file\n"   \
+"\t"  __BUG_REL(%P0) "\t# bug_entry::file\n"   \
 "\t.word %c1""\t# bug_entry::line\n"   \
 "\t.word %c2""\t# bug_entry::flags\n"  \
 "\t.org 2b+%c3\n"  \
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 03/27] x86: Use symbol name in jump table for PIE support

2018-06-25 Thread Thomas Garnier
Replace the %c constraint with %P. The %c is incompatible with PIE
because it implies an immediate value whereas %P reference a symbol.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/jump_label.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/jump_label.h 
b/arch/x86/include/asm/jump_label.h
index 8c0de4282659..dfdcdc39604a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -37,9 +37,9 @@ static __always_inline bool arch_static_branch(struct 
static_key *key, bool bran
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
-   _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+   _ASM_PTR "1b, %l[l_yes], %P0 \n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : :  "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
@@ -53,9 +53,9 @@ static __always_inline bool arch_static_branch_jump(struct 
static_key *key, bool
"2:\n\t"
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
-   _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+   _ASM_PTR "1b, %l[l_yes], %P0 \n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : :  "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 02/27] x86: Use symbol name on bug table for PIE support

2018-06-25 Thread Thomas Garnier
Replace the %c constraint with %P. The %c is incompatible with PIE
because it implies an immediate value whereas %P reference a symbol.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/bug.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 6804d6642767..3d690a4abf50 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -35,7 +35,7 @@ do {  
\
asm volatile("1:\t" ins "\n"\
 ".pushsection __bug_table,\"aw\"\n"\
 "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
-"\t"  __BUG_REL(%c0) "\t# bug_entry::file\n"   \
+"\t"  __BUG_REL(%P0) "\t# bug_entry::file\n"   \
 "\t.word %c1""\t# bug_entry::line\n"   \
 "\t.word %c2""\t# bug_entry::flags\n"  \
 "\t.org 2b+%c3\n"  \
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 03/27] x86: Use symbol name in jump table for PIE support

2018-06-25 Thread Thomas Garnier
Replace the %c constraint with %P. The %c is incompatible with PIE
because it implies an immediate value whereas %P reference a symbol.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/jump_label.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/jump_label.h 
b/arch/x86/include/asm/jump_label.h
index 8c0de4282659..dfdcdc39604a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -37,9 +37,9 @@ static __always_inline bool arch_static_branch(struct 
static_key *key, bool bran
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
-   _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+   _ASM_PTR "1b, %l[l_yes], %P0 \n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : :  "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
@@ -53,9 +53,9 @@ static __always_inline bool arch_static_branch_jump(struct 
static_key *key, bool
"2:\n\t"
".pushsection __jump_table,  \"aw\" \n\t"
_ASM_ALIGN "\n\t"
-   _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+   _ASM_PTR "1b, %l[l_yes], %P0 \n\t"
".popsection \n\t"
-   : :  "i" (key), "i" (branch) : : l_yes);
+   : :  "X" (&((char *)key)[branch]) : : l_yes);
 
return false;
 l_yes:
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 05/27] x86: relocate_kernel - Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/relocate_kernel_64.S | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index 11eda21eb697..a7227dfe1a2b 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -208,9 +208,11 @@ identity_mapped:
movq%rax, %cr3
lea PAGE_SIZE(%r8), %rsp
callswap_pages
-   movq$virtual_mapped, %rax
-   pushq   %rax
-   ret
+   jmp *virtual_mapped_addr(%rip)
+
+   /* Absolute value for PIE support */
+virtual_mapped_addr:
+   .quad virtual_mapped
 
 virtual_mapped:
movqRSP(%r8), %rsp
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 05/27] x86: relocate_kernel - Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/kernel/relocate_kernel_64.S | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index 11eda21eb697..a7227dfe1a2b 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -208,9 +208,11 @@ identity_mapped:
movq%rax, %cr3
lea PAGE_SIZE(%r8), %rsp
callswap_pages
-   movq$virtual_mapped, %rax
-   pushq   %rax
-   ret
+   jmp *virtual_mapped_addr(%rip)
+
+   /* Absolute value for PIE support */
+virtual_mapped_addr:
+   .quad virtual_mapped
 
 virtual_mapped:
movqRSP(%r8), %rsp
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 07/27] x86: pm-trace - Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change assembly to use the new _ASM_MOVABS macro instead of _ASM_MOV for
the assembly to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/pm-trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/pm-trace.h b/arch/x86/include/asm/pm-trace.h
index bfa32aa428e5..972070806ce9 100644
--- a/arch/x86/include/asm/pm-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -8,7 +8,7 @@
 do {   \
if (pm_trace_enabled) { \
const void *tracedata;  \
-   asm volatile(_ASM_MOV " $1f,%0\n"   \
+   asm volatile(_ASM_MOVABS " $1f,%0\n"\
 ".section .tracedata,\"a\"\n"  \
 "1:\t.word %c1\n\t"\
 _ASM_PTR " %c2\n"  \
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 08/27] x86/CPU: Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible. Use the new _ASM_MOVABS macro instead of
the 'mov $symbol, %dst' construct.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/processor.h | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cfd29ee8c3da..25b5842a4646 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -50,7 +50,7 @@ static inline void *current_text_addr(void)
 {
void *pc;
 
-   asm volatile("mov $1f, %0; 1:":"=r" (pc));
+   asm volatile(_ASM_MOVABS " $1f, %0; 1:":"=r" (pc));
 
return pc;
 }
@@ -711,6 +711,7 @@ static inline void sync_core(void)
: ASM_CALL_CONSTRAINT : : "memory");
 #else
unsigned int tmp;
+   unsigned long tmp2;
 
asm volatile (
UNWIND_HINT_SAVE
@@ -721,11 +722,13 @@ static inline void sync_core(void)
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
-   "pushq $1f\n\t"
+   "leaq 1f(%%rip), %1\n\t"
+   "pushq %1\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
-   : "=" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+   : "=" (tmp), "=" (tmp2), ASM_CALL_CONSTRAINT
+   : : "cc", "memory");
 #endif
 }
 
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 07/27] x86: pm-trace - Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change assembly to use the new _ASM_MOVABS macro instead of _ASM_MOV for
the assembly to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/pm-trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/pm-trace.h b/arch/x86/include/asm/pm-trace.h
index bfa32aa428e5..972070806ce9 100644
--- a/arch/x86/include/asm/pm-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -8,7 +8,7 @@
 do {   \
if (pm_trace_enabled) { \
const void *tracedata;  \
-   asm volatile(_ASM_MOV " $1f,%0\n"   \
+   asm volatile(_ASM_MOVABS " $1f,%0\n"\
 ".section .tracedata,\"a\"\n"  \
 "1:\t.word %c1\n\t"\
 _ASM_PTR " %c2\n"  \
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 08/27] x86/CPU: Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible. Use the new _ASM_MOVABS macro instead of
the 'mov $symbol, %dst' construct.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/include/asm/processor.h | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cfd29ee8c3da..25b5842a4646 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -50,7 +50,7 @@ static inline void *current_text_addr(void)
 {
void *pc;
 
-   asm volatile("mov $1f, %0; 1:":"=r" (pc));
+   asm volatile(_ASM_MOVABS " $1f, %0; 1:":"=r" (pc));
 
return pc;
 }
@@ -711,6 +711,7 @@ static inline void sync_core(void)
: ASM_CALL_CONSTRAINT : : "memory");
 #else
unsigned int tmp;
+   unsigned long tmp2;
 
asm volatile (
UNWIND_HINT_SAVE
@@ -721,11 +722,13 @@ static inline void sync_core(void)
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
-   "pushq $1f\n\t"
+   "leaq 1f(%%rip), %1\n\t"
+   "pushq %1\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
-   : "=" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+   : "=" (tmp), "=" (tmp2), ASM_CALL_CONSTRAINT
+   : : "cc", "memory");
 #endif
 }
 
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 11/27] x86/power/64: Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
---
 arch/x86/power/hibernate_asm_64.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..6fdd7bbc3c33 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -24,7 +24,7 @@
 #include 
 
 ENTRY(swsusp_arch_suspend)
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -115,7 +115,7 @@ ENTRY(restore_registers)
movq%rax, %cr4;  # turn PGE back on
 
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqpt_regs_sp(%rax), %rsp
movqpt_regs_bp(%rax), %rbp
movqpt_regs_si(%rax), %rsi
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 11/27] x86/power/64: Adapt assembly for PIE support

2018-06-25 Thread Thomas Garnier
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
Acked-by: Pavel Machek 
Acked-by: Rafael J. Wysocki 
---
 arch/x86/power/hibernate_asm_64.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..6fdd7bbc3c33 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -24,7 +24,7 @@
 #include 
 
 ENTRY(swsusp_arch_suspend)
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movq%rsp, pt_regs_sp(%rax)
movq%rbp, pt_regs_bp(%rax)
movq%rsi, pt_regs_si(%rax)
@@ -115,7 +115,7 @@ ENTRY(restore_registers)
movq%rax, %cr4;  # turn PGE back on
 
/* We don't restore %rax, it must be 0 anyway */
-   movq$saved_context, %rax
+   leaqsaved_context(%rip), %rax
movqpt_regs_sp(%rax), %rsp
movqpt_regs_bp(%rax), %rbp
movqpt_regs_si(%rax), %rsi
-- 
2.18.0.rc2.346.g013aa6912e-goog



[PATCH v5 17/27] x86/relocs: Handle PIE relocations

2018-06-25 Thread Thomas Garnier
Change the relocation tool to correctly handle relocations generated by
-fPIE option:

 - Add relocation for each entry of the .got section given the linker does not
   generate R_X86_64_GLOB_DAT on a simple link.
 - Ignore R_X86_64_GOTPCREL.

Signed-off-by: Thomas Garnier 
---
 arch/x86/tools/relocs.c | 93 -
 1 file changed, 92 insertions(+), 1 deletion(-)

diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 220e97841e49..a35cc337f883 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -32,6 +32,7 @@ struct section {
Elf_Sym*symtab;
Elf_Rel*reltab;
char   *strtab;
+   Elf_Addr   *got;
 };
 static struct section *secs;
 
@@ -293,6 +294,35 @@ static Elf_Sym *sym_lookup(const char *symname)
return 0;
 }
 
+static Elf_Sym *sym_lookup_addr(Elf_Addr addr, const char **name)
+{
+   int i;
+   for (i = 0; i < ehdr.e_shnum; i++) {
+   struct section *sec = [i];
+   long nsyms;
+   Elf_Sym *symtab;
+   Elf_Sym *sym;
+
+   if (sec->shdr.sh_type != SHT_SYMTAB)
+   continue;
+
+   nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
+   symtab = sec->symtab;
+
+   for (sym = symtab; --nsyms >= 0; sym++) {
+   if (sym->st_value == addr) {
+   if (name) {
+   *name = sym_name(sec->link->strtab,
+sym);
+   }
+   return sym;
+   }
+   }
+   }
+   return 0;
+}
+
+
 #if BYTE_ORDER == LITTLE_ENDIAN
 #define le16_to_cpu(val) (val)
 #define le32_to_cpu(val) (val)
@@ -513,6 +543,33 @@ static void read_relocs(FILE *fp)
}
 }
 
+static void read_got(FILE *fp)
+{
+   int i;
+   for (i = 0; i < ehdr.e_shnum; i++) {
+   struct section *sec = [i];
+   sec->got = NULL;
+   if (sec->shdr.sh_type != SHT_PROGBITS ||
+   strcmp(sec_name(i), ".got")) {
+   continue;
+   }
+   sec->got = malloc(sec->shdr.sh_size);
+   if (!sec->got) {
+   die("malloc of %d bytes for got failed\n",
+   sec->shdr.sh_size);
+   }
+   if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
+   die("Seek to %d failed: %s\n",
+   sec->shdr.sh_offset, strerror(errno));
+   }
+   if (fread(sec->got, 1, sec->shdr.sh_size, fp)
+   != sec->shdr.sh_size) {
+   die("Cannot read got: %s\n",
+   strerror(errno));
+   }
+   }
+}
+
 
 static void print_absolute_symbols(void)
 {
@@ -643,6 +700,32 @@ static void add_reloc(struct relocs *r, uint32_t offset)
r->offset[r->count++] = offset;
 }
 
+/*
+ * The linker does not generate relocations for the GOT for the kernel.
+ * If a GOT is found, simulate the relocations that should have been included.
+ */
+static void walk_got_table(int (*process)(struct section *sec, Elf_Rel *rel,
+ Elf_Sym *sym, const char *symname),
+  struct section *sec)
+{
+   int i;
+   Elf_Addr entry;
+   Elf_Sym *sym;
+   const char *symname;
+   Elf_Rel rel;
+
+   for (i = 0; i < sec->shdr.sh_size/sizeof(Elf_Addr); i++) {
+   entry = sec->got[i];
+   sym = sym_lookup_addr(entry, );
+   if (!sym)
+   die("Could not found got symbol for entry %d\n", i);
+   rel.r_offset = sec->shdr.sh_addr + i * sizeof(Elf_Addr);
+   rel.r_info = ELF_BITS == 64 ? R_X86_64_GLOB_DAT
+: R_386_GLOB_DAT;
+   process(sec, , sym, symname);
+   }
+}
+
 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
 {
@@ -656,6 +739,8 @@ static void walk_relocs(int (*process)(struct section *sec, 
Elf_Rel *rel,
struct section *sec = [i];
 
if (sec->shdr.sh_type != SHT_REL_TYPE) {
+   if (sec->got)
+   walk_got_table(process, sec);
continue;
}
sec_symtab  = sec->link;
@@ -765,6 +850,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, 
ElfW(Sym) *sym,
offset += per_cpu_load_addr;
 
switch (r_type) {
+   case R_X86_64_GOTPCREL:
case R_X86_64_NONE:
/*

[PATCH v5 17/27] x86/relocs: Handle PIE relocations

2018-06-25 Thread Thomas Garnier
Change the relocation tool to correctly handle relocations generated by
-fPIE option:

 - Add relocation for each entry of the .got section given the linker does not
   generate R_X86_64_GLOB_DAT on a simple link.
 - Ignore R_X86_64_GOTPCREL.

Signed-off-by: Thomas Garnier 
---
 arch/x86/tools/relocs.c | 93 -
 1 file changed, 92 insertions(+), 1 deletion(-)

diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 220e97841e49..a35cc337f883 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -32,6 +32,7 @@ struct section {
Elf_Sym*symtab;
Elf_Rel*reltab;
char   *strtab;
+   Elf_Addr   *got;
 };
 static struct section *secs;
 
@@ -293,6 +294,35 @@ static Elf_Sym *sym_lookup(const char *symname)
return 0;
 }
 
+static Elf_Sym *sym_lookup_addr(Elf_Addr addr, const char **name)
+{
+   int i;
+   for (i = 0; i < ehdr.e_shnum; i++) {
+   struct section *sec = [i];
+   long nsyms;
+   Elf_Sym *symtab;
+   Elf_Sym *sym;
+
+   if (sec->shdr.sh_type != SHT_SYMTAB)
+   continue;
+
+   nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
+   symtab = sec->symtab;
+
+   for (sym = symtab; --nsyms >= 0; sym++) {
+   if (sym->st_value == addr) {
+   if (name) {
+   *name = sym_name(sec->link->strtab,
+sym);
+   }
+   return sym;
+   }
+   }
+   }
+   return 0;
+}
+
+
 #if BYTE_ORDER == LITTLE_ENDIAN
 #define le16_to_cpu(val) (val)
 #define le32_to_cpu(val) (val)
@@ -513,6 +543,33 @@ static void read_relocs(FILE *fp)
}
 }
 
+static void read_got(FILE *fp)
+{
+   int i;
+   for (i = 0; i < ehdr.e_shnum; i++) {
+   struct section *sec = [i];
+   sec->got = NULL;
+   if (sec->shdr.sh_type != SHT_PROGBITS ||
+   strcmp(sec_name(i), ".got")) {
+   continue;
+   }
+   sec->got = malloc(sec->shdr.sh_size);
+   if (!sec->got) {
+   die("malloc of %d bytes for got failed\n",
+   sec->shdr.sh_size);
+   }
+   if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
+   die("Seek to %d failed: %s\n",
+   sec->shdr.sh_offset, strerror(errno));
+   }
+   if (fread(sec->got, 1, sec->shdr.sh_size, fp)
+   != sec->shdr.sh_size) {
+   die("Cannot read got: %s\n",
+   strerror(errno));
+   }
+   }
+}
+
 
 static void print_absolute_symbols(void)
 {
@@ -643,6 +700,32 @@ static void add_reloc(struct relocs *r, uint32_t offset)
r->offset[r->count++] = offset;
 }
 
+/*
+ * The linker does not generate relocations for the GOT for the kernel.
+ * If a GOT is found, simulate the relocations that should have been included.
+ */
+static void walk_got_table(int (*process)(struct section *sec, Elf_Rel *rel,
+ Elf_Sym *sym, const char *symname),
+  struct section *sec)
+{
+   int i;
+   Elf_Addr entry;
+   Elf_Sym *sym;
+   const char *symname;
+   Elf_Rel rel;
+
+   for (i = 0; i < sec->shdr.sh_size/sizeof(Elf_Addr); i++) {
+   entry = sec->got[i];
+   sym = sym_lookup_addr(entry, );
+   if (!sym)
+   die("Could not found got symbol for entry %d\n", i);
+   rel.r_offset = sec->shdr.sh_addr + i * sizeof(Elf_Addr);
+   rel.r_info = ELF_BITS == 64 ? R_X86_64_GLOB_DAT
+: R_386_GLOB_DAT;
+   process(sec, , sym, symname);
+   }
+}
+
 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
 {
@@ -656,6 +739,8 @@ static void walk_relocs(int (*process)(struct section *sec, 
Elf_Rel *rel,
struct section *sec = [i];
 
if (sec->shdr.sh_type != SHT_REL_TYPE) {
+   if (sec->got)
+   walk_got_table(process, sec);
continue;
}
sec_symtab  = sec->link;
@@ -765,6 +850,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, 
ElfW(Sym) *sym,
offset += per_cpu_load_addr;
 
switch (r_type) {
+   case R_X86_64_GOTPCREL:
case R_X86_64_NONE:
/*

[PATCH v5 23/27] x86/modules: Adapt module loading for PIE support

2018-06-25 Thread Thomas Garnier
Adapt module loading to support PIE relocations. Generate dynamic GOT if
a symbol requires it but no entry exists in the kernel GOT.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/Makefile   |   4 +
 arch/x86/include/asm/module.h   |  11 ++
 arch/x86/include/asm/sections.h |   4 +
 arch/x86/kernel/module.c| 181 +++-
 arch/x86/kernel/module.lds  |   3 +
 5 files changed, 198 insertions(+), 5 deletions(-)
 create mode 100644 arch/x86/kernel/module.lds

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index c2c221fd20d7..112ccdc99566 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -134,7 +134,11 @@ else
 KBUILD_CFLAGS += $(cflags-y)
 
 KBUILD_CFLAGS += -mno-red-zone
+ifdef CONFIG_X86_PIE
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/x86/kernel/module.lds
+else
 KBUILD_CFLAGS += -mcmodel=kernel
+endif
 
 # -funit-at-a-time shrinks the kernel .text considerably
 # unfortunately it makes reading oopses harder.
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 7948a17febb4..68ff05e14288 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,12 +5,23 @@
 #include 
 #include 
 
+#ifdef CONFIG_X86_PIE
+struct mod_got_sec {
+   struct elf64_shdr   *got;
+   int got_num_entries;
+   int got_max_entries;
+};
+#endif
+
 struct mod_arch_specific {
 #ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
 #endif
+#ifdef CONFIG_X86_PIE
+   struct mod_got_sec  core;
+#endif
 };
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 5c019d23d06b..da3d98bb2bcb 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -13,4 +13,8 @@ extern char __end_rodata_hpage_align[];
 extern char __entry_trampoline_start[], __entry_trampoline_end[];
 #endif
 
+#if defined(CONFIG_X86_PIE)
+extern char __start_got[], __end_got[];
+#endif
+
 #endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f58336af095c..88895f3d474b 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -77,6 +78,173 @@ static unsigned long int get_module_load_offset(void)
 }
 #endif
 
+#ifdef CONFIG_X86_PIE
+static u64 find_got_kernel_entry(Elf64_Sym *sym, const Elf64_Rela *rela)
+{
+   u64 *pos;
+
+   for (pos = (u64*)__start_got; pos < (u64*)__end_got; pos++) {
+   if (*pos == sym->st_value)
+   return (u64)pos + rela->r_addend;
+   }
+
+   return 0;
+}
+
+static u64 module_emit_got_entry(struct module *mod, void *loc,
+const Elf64_Rela *rela, Elf64_Sym *sym)
+{
+   struct mod_got_sec *gotsec = >arch.core;
+   u64 *got = (u64*)gotsec->got->sh_addr;
+   int i = gotsec->got_num_entries;
+   u64 ret;
+
+   /* Check if we can use the kernel GOT */
+   ret = find_got_kernel_entry(sym, rela);
+   if (ret)
+   return ret;
+
+   got[i] = sym->st_value;
+
+   /*
+* Check if the entry we just created is a duplicate. Given that the
+* relocations are sorted, this will be the last entry we allocated.
+* (if one exists).
+*/
+   if (i > 0 && got[i] == got[i - 2]) {
+   ret = (u64)[i - 1];
+   } else {
+   gotsec->got_num_entries++;
+   BUG_ON(gotsec->got_num_entries > gotsec->got_max_entries);
+   ret = (u64)[i];
+   }
+
+   return ret + rela->r_addend;
+}
+
+#define cmp_3way(a,b)  ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+   const Elf64_Rela *x = a, *y = b;
+   int i;
+
+   /* sort by type, symbol index and addend */
+   i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+   if (i == 0)
+   i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+   if (i == 0)
+   i = cmp_3way(x->r_addend, y->r_addend);
+   return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+   /*
+* Entries are sorted by type, symbol index and addend. That means
+* that, if a duplicate entry exists, it must be in the preceding
+* slot.
+*/
+   return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_gots(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+{
+   unsigned int ret = 0;
+   Elf64_Sym *s;
+   int i;
+
+   for (i = 0; i < 

[PATCH v5 23/27] x86/modules: Adapt module loading for PIE support

2018-06-25 Thread Thomas Garnier
Adapt module loading to support PIE relocations. Generate dynamic GOT if
a symbol requires it but no entry exists in the kernel GOT.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range 0x8000.

Signed-off-by: Thomas Garnier 
---
 arch/x86/Makefile   |   4 +
 arch/x86/include/asm/module.h   |  11 ++
 arch/x86/include/asm/sections.h |   4 +
 arch/x86/kernel/module.c| 181 +++-
 arch/x86/kernel/module.lds  |   3 +
 5 files changed, 198 insertions(+), 5 deletions(-)
 create mode 100644 arch/x86/kernel/module.lds

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index c2c221fd20d7..112ccdc99566 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -134,7 +134,11 @@ else
 KBUILD_CFLAGS += $(cflags-y)
 
 KBUILD_CFLAGS += -mno-red-zone
+ifdef CONFIG_X86_PIE
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/x86/kernel/module.lds
+else
 KBUILD_CFLAGS += -mcmodel=kernel
+endif
 
 # -funit-at-a-time shrinks the kernel .text considerably
 # unfortunately it makes reading oopses harder.
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 7948a17febb4..68ff05e14288 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,12 +5,23 @@
 #include 
 #include 
 
+#ifdef CONFIG_X86_PIE
+struct mod_got_sec {
+   struct elf64_shdr   *got;
+   int got_num_entries;
+   int got_max_entries;
+};
+#endif
+
 struct mod_arch_specific {
 #ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
 #endif
+#ifdef CONFIG_X86_PIE
+   struct mod_got_sec  core;
+#endif
 };
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 5c019d23d06b..da3d98bb2bcb 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -13,4 +13,8 @@ extern char __end_rodata_hpage_align[];
 extern char __entry_trampoline_start[], __entry_trampoline_end[];
 #endif
 
+#if defined(CONFIG_X86_PIE)
+extern char __start_got[], __end_got[];
+#endif
+
 #endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f58336af095c..88895f3d474b 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -77,6 +78,173 @@ static unsigned long int get_module_load_offset(void)
 }
 #endif
 
+#ifdef CONFIG_X86_PIE
+static u64 find_got_kernel_entry(Elf64_Sym *sym, const Elf64_Rela *rela)
+{
+   u64 *pos;
+
+   for (pos = (u64*)__start_got; pos < (u64*)__end_got; pos++) {
+   if (*pos == sym->st_value)
+   return (u64)pos + rela->r_addend;
+   }
+
+   return 0;
+}
+
+static u64 module_emit_got_entry(struct module *mod, void *loc,
+const Elf64_Rela *rela, Elf64_Sym *sym)
+{
+   struct mod_got_sec *gotsec = >arch.core;
+   u64 *got = (u64*)gotsec->got->sh_addr;
+   int i = gotsec->got_num_entries;
+   u64 ret;
+
+   /* Check if we can use the kernel GOT */
+   ret = find_got_kernel_entry(sym, rela);
+   if (ret)
+   return ret;
+
+   got[i] = sym->st_value;
+
+   /*
+* Check if the entry we just created is a duplicate. Given that the
+* relocations are sorted, this will be the last entry we allocated.
+* (if one exists).
+*/
+   if (i > 0 && got[i] == got[i - 2]) {
+   ret = (u64)[i - 1];
+   } else {
+   gotsec->got_num_entries++;
+   BUG_ON(gotsec->got_num_entries > gotsec->got_max_entries);
+   ret = (u64)[i];
+   }
+
+   return ret + rela->r_addend;
+}
+
+#define cmp_3way(a,b)  ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+   const Elf64_Rela *x = a, *y = b;
+   int i;
+
+   /* sort by type, symbol index and addend */
+   i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+   if (i == 0)
+   i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+   if (i == 0)
+   i = cmp_3way(x->r_addend, y->r_addend);
+   return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+   /*
+* Entries are sorted by type, symbol index and addend. That means
+* that, if a duplicate entry exists, it must be in the preceding
+* slot.
+*/
+   return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_gots(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+{
+   unsigned int ret = 0;
+   Elf64_Sym *s;
+   int i;
+
+   for (i = 0; i < 

  1   2   3   4   5   6   7   8   9   >