Re: [PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support

2017-10-26 Thread Thomas Garnier via Virtualization
On Fri, Oct 20, 2017 at 1:26 AM, Ingo Molnar  wrote:
>
> * Thomas Garnier  wrote:
>
>> Change the assembly code to use only relative references of symbols for the
>> kernel to be PIE compatible.
>>
>> Position Independent Executable (PIE) support will allow to extended the
>> KASLR randomization range below the -2G memory limit.
>>
>> Signed-off-by: Thomas Garnier 
>> ---
>>  arch/x86/entry/entry_64.S | 22 +++---
>>  1 file changed, 15 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
>> index 49167258d587..15bd5530d2ae 100644
>> --- a/arch/x86/entry/entry_64.S
>> +++ b/arch/x86/entry/entry_64.S
>> @@ -194,12 +194,15 @@ entry_SYSCALL_64_fastpath:
>>   ja  1f  /* return -ENOSYS (already in 
>> pt_regs->ax) */
>>   movq%r10, %rcx
>>
>> + /* Ensures the call is position independent */
>> + leaqsys_call_table(%rip), %r11
>> +
>>   /*
>>* This call instruction is handled specially in stub_ptregs_64.
>>* It might end up jumping to the slow path.  If it jumps, RAX
>>* and all argument registers are clobbered.
>>*/
>> - call*sys_call_table(, %rax, 8)
>> + call*(%r11, %rax, 8)
>>  .Lentry_SYSCALL_64_after_fastpath_call:
>>
>>   movq%rax, RAX(%rsp)
>> @@ -334,7 +337,8 @@ ENTRY(stub_ptregs_64)
>>* RAX stores a pointer to the C function implementing the syscall.
>>* IRQs are on.
>>*/
>> - cmpq$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
>> + leaq.Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
>> + cmpq%r11, (%rsp)
>>   jne 1f
>>
>>   /*
>> @@ -1172,7 +1176,8 @@ ENTRY(error_entry)
>>   movl%ecx, %eax  /* zero extend */
>>   cmpq%rax, RIP+8(%rsp)
>>   je  .Lbstep_iret
>> - cmpq$.Lgs_change, RIP+8(%rsp)
>> + leaq.Lgs_change(%rip), %rcx
>> + cmpq%rcx, RIP+8(%rsp)
>>   jne .Lerror_entry_done
>>
>>   /*
>> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
>>* resume the outer NMI.
>>*/
>>
>> - movq$repeat_nmi, %rdx
>> + leaqrepeat_nmi(%rip), %rdx
>>   cmpq8(%rsp), %rdx
>>   ja  1f
>> - movq$end_repeat_nmi, %rdx
>> + leaqend_repeat_nmi(%rip), %rdx
>>   cmpq8(%rsp), %rdx
>>   ja  nested_nmi_out
>>  1:
>> @@ -1440,7 +1445,8 @@ nested_nmi:
>>   pushq   %rdx
>>   pushfq
>>   pushq   $__KERNEL_CS
>> - pushq   $repeat_nmi
>> + leaqrepeat_nmi(%rip), %rdx
>> + pushq   %rdx
>>
>>   /* Put stack back */
>>   addq$(6*8), %rsp
>> @@ -1479,7 +1485,9 @@ first_nmi:
>>   addq$8, (%rsp)  /* Fix up RSP */
>>   pushfq  /* RFLAGS */
>>   pushq   $__KERNEL_CS/* CS */
>> - pushq   $1f /* RIP */
>> + pushq   %rax/* Support Position Independent Code */
>> + leaq1f(%rip), %rax  /* RIP */
>> + xchgq   %rax, (%rsp)/* Restore RAX, put 1f */
>>   INTERRUPT_RETURN/* continues at repeat_nmi below */
>>   UNWIND_HINT_IRET_REGS
>
> This patch seems to add extra overhead to the syscall fast-path even when PIE 
> is
> disabled, right?

It does add extra instructions when one is not possible, I preferred
that over ifdefing but I can change it.

>
> Thanks,
>
> Ingo



-- 
Thomas
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support

2017-10-26 Thread Thomas Garnier via Virtualization
Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extended the
KASLR randomization range below the -2G memory limit.

Signed-off-by: Thomas Garnier 
---
 arch/x86/entry/entry_64.S | 22 +++---
 1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 49167258d587..15bd5530d2ae 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -194,12 +194,15 @@ entry_SYSCALL_64_fastpath:
ja  1f  /* return -ENOSYS (already in 
pt_regs->ax) */
movq%r10, %rcx
 
+   /* Ensures the call is position independent */
+   leaqsys_call_table(%rip), %r11
+
/*
 * This call instruction is handled specially in stub_ptregs_64.
 * It might end up jumping to the slow path.  If it jumps, RAX
 * and all argument registers are clobbered.
 */
-   call*sys_call_table(, %rax, 8)
+   call*(%r11, %rax, 8)
 .Lentry_SYSCALL_64_after_fastpath_call:
 
movq%rax, RAX(%rsp)
@@ -334,7 +337,8 @@ ENTRY(stub_ptregs_64)
 * RAX stores a pointer to the C function implementing the syscall.
 * IRQs are on.
 */
-   cmpq$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
+   leaq.Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
+   cmpq%r11, (%rsp)
jne 1f
 
/*
@@ -1172,7 +1176,8 @@ ENTRY(error_entry)
movl%ecx, %eax  /* zero extend */
cmpq%rax, RIP+8(%rsp)
je  .Lbstep_iret
-   cmpq$.Lgs_change, RIP+8(%rsp)
+   leaq.Lgs_change(%rip), %rcx
+   cmpq%rcx, RIP+8(%rsp)
jne .Lerror_entry_done
 
/*
@@ -1383,10 +1388,10 @@ ENTRY(nmi)
 * resume the outer NMI.
 */
 
-   movq$repeat_nmi, %rdx
+   leaqrepeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  1f
-   movq$end_repeat_nmi, %rdx
+   leaqend_repeat_nmi(%rip), %rdx
cmpq8(%rsp), %rdx
ja  nested_nmi_out
 1:
@@ -1440,7 +1445,8 @@ nested_nmi:
pushq   %rdx
pushfq
pushq   $__KERNEL_CS
-   pushq   $repeat_nmi
+   leaqrepeat_nmi(%rip), %rdx
+   pushq   %rdx
 
/* Put stack back */
addq$(6*8), %rsp
@@ -1479,7 +1485,9 @@ first_nmi:
addq$8, (%rsp)  /* Fix up RSP */
pushfq  /* RFLAGS */
pushq   $__KERNEL_CS/* CS */
-   pushq   $1f /* RIP */
+   pushq   %rax/* Support Position Independent Code */
+   leaq1f(%rip), %rax  /* RIP */
+   xchgq   %rax, (%rsp)/* Restore RAX, put 1f */
INTERRUPT_RETURN/* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
 1:
-- 
2.15.0.rc0.271.g36b669edcc-goog

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support

2017-10-20 Thread Andy Lutomirski


> On Oct 20, 2017, at 5:20 PM, Ingo Molnar  wrote:
> 
> 
> * Thomas Garnier  wrote:
> 
   */
 - cmpq$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
 + leaq.Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
 + cmpq%r11, (%rsp)
  jne 1f
> 
>>> This patch seems to add extra overhead to the syscall fast-path even when 
>>> PIE is
>>> disabled, right?
>> 
>> It does add extra instructions when one is not possible, I preferred
>> that over ifdefing but I can change it.
> 
> So my problem is, this pattern repeats in many other places as well, but 
> sprinking 
> various pieces of assembly code with #ifdefs would be very bad as well.
> 
> I have no good idea how to solve this.
> 
> Thanks,

Ugh, brain was off.  This is a bit messy. We could use a macro for this, too, I 
suppose.

> 
>Ingo
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support

2017-10-20 Thread Andy Lutomirski


> On Oct 20, 2017, at 5:20 PM, Ingo Molnar  wrote:
> 
> 
> * Thomas Garnier  wrote:
> 
   */
 - cmpq$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
 + leaq.Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
 + cmpq%r11, (%rsp)
  jne 1f
> 
>>> This patch seems to add extra overhead to the syscall fast-path even when 
>>> PIE is
>>> disabled, right?
>> 
>> It does add extra instructions when one is not possible, I preferred
>> that over ifdefing but I can change it.
> 
> So my problem is, this pattern repeats in many other places as well, but 
> sprinking 
> various pieces of assembly code with #ifdefs would be very bad as well.
> 
> I have no good idea how to solve this.
> 

How about:

.macro JMP_TO_LABEL ...


> Thanks,
> 
>Ingo
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support

2017-10-20 Thread Ingo Molnar

* Thomas Garnier  wrote:

> >>*/
> >> - cmpq$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
> >> + leaq.Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
> >> + cmpq%r11, (%rsp)
> >>   jne 1f

> > This patch seems to add extra overhead to the syscall fast-path even when 
> > PIE is
> > disabled, right?
> 
> It does add extra instructions when one is not possible, I preferred
> that over ifdefing but I can change it.

So my problem is, this pattern repeats in many other places as well, but 
sprinking 
various pieces of assembly code with #ifdefs would be very bad as well.

I have no good idea how to solve this.

Thanks,

Ingo
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support

2017-10-20 Thread Ingo Molnar

* Thomas Garnier  wrote:

> Change the assembly code to use only relative references of symbols for the
> kernel to be PIE compatible.
> 
> Position Independent Executable (PIE) support will allow to extended the
> KASLR randomization range below the -2G memory limit.
> 
> Signed-off-by: Thomas Garnier 
> ---
>  arch/x86/entry/entry_64.S | 22 +++---
>  1 file changed, 15 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index 49167258d587..15bd5530d2ae 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -194,12 +194,15 @@ entry_SYSCALL_64_fastpath:
>   ja  1f  /* return -ENOSYS (already in 
> pt_regs->ax) */
>   movq%r10, %rcx
>  
> + /* Ensures the call is position independent */
> + leaqsys_call_table(%rip), %r11
> +
>   /*
>* This call instruction is handled specially in stub_ptregs_64.
>* It might end up jumping to the slow path.  If it jumps, RAX
>* and all argument registers are clobbered.
>*/
> - call*sys_call_table(, %rax, 8)
> + call*(%r11, %rax, 8)
>  .Lentry_SYSCALL_64_after_fastpath_call:
>  
>   movq%rax, RAX(%rsp)
> @@ -334,7 +337,8 @@ ENTRY(stub_ptregs_64)
>* RAX stores a pointer to the C function implementing the syscall.
>* IRQs are on.
>*/
> - cmpq$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
> + leaq.Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
> + cmpq%r11, (%rsp)
>   jne 1f
>  
>   /*
> @@ -1172,7 +1176,8 @@ ENTRY(error_entry)
>   movl%ecx, %eax  /* zero extend */
>   cmpq%rax, RIP+8(%rsp)
>   je  .Lbstep_iret
> - cmpq$.Lgs_change, RIP+8(%rsp)
> + leaq.Lgs_change(%rip), %rcx
> + cmpq%rcx, RIP+8(%rsp)
>   jne .Lerror_entry_done
>  
>   /*
> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
>* resume the outer NMI.
>*/
>  
> - movq$repeat_nmi, %rdx
> + leaqrepeat_nmi(%rip), %rdx
>   cmpq8(%rsp), %rdx
>   ja  1f
> - movq$end_repeat_nmi, %rdx
> + leaqend_repeat_nmi(%rip), %rdx
>   cmpq8(%rsp), %rdx
>   ja  nested_nmi_out
>  1:
> @@ -1440,7 +1445,8 @@ nested_nmi:
>   pushq   %rdx
>   pushfq
>   pushq   $__KERNEL_CS
> - pushq   $repeat_nmi
> + leaqrepeat_nmi(%rip), %rdx
> + pushq   %rdx
>  
>   /* Put stack back */
>   addq$(6*8), %rsp
> @@ -1479,7 +1485,9 @@ first_nmi:
>   addq$8, (%rsp)  /* Fix up RSP */
>   pushfq  /* RFLAGS */
>   pushq   $__KERNEL_CS/* CS */
> - pushq   $1f /* RIP */
> + pushq   %rax/* Support Position Independent Code */
> + leaq1f(%rip), %rax  /* RIP */
> + xchgq   %rax, (%rsp)/* Restore RAX, put 1f */
>   INTERRUPT_RETURN/* continues at repeat_nmi below */
>   UNWIND_HINT_IRET_REGS

This patch seems to add extra overhead to the syscall fast-path even when PIE 
is 
disabled, right?

Thanks,

Ingo
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization