Re: [PATCH v3 23/27] x86/modules: Adapt module loading for PIE support

2018-05-23 Thread Randy Dunlap
On 05/23/2018 03:01 PM, Thomas Garnier wrote:
> On Wed, May 23, 2018 at 2:27 PM Randy Dunlap  wrote:
> 
>> Hi,
> 
>> (for several patches in this series:)
>> The commit message is confusing.  See below.
> 
> Thanks for the edits, I will change the different commit messages.
> 
> 
> 
>> On 05/23/2018 12:54 PM, Thomas Garnier wrote:
>>> Adapt module loading to support PIE relocations. Generate dynamic GOT if
>>> a symbol requires it but no entry exist in the kernel GOT.
> 
>>  exists
> 
>>>
>>> Position Independent Executable (PIE) support will allow to extended the
> 
>>  will allow us to extend
> the
> 
>>> KASLR randomization range below the -2G memory limit.
> 
>> Does that say "below th negative 2G memory limit"?
>> I don't get it.
> 
> Yes, below 0x8000 basically. I think I will just say that.

Yes, please, that's much better.

> 
> 
>>>
>>> Signed-off-by: Thomas Garnier 
>>> ---
>>>  arch/x86/Makefile   |   4 +
>>>  arch/x86/include/asm/module.h   |  11 ++
>>>  arch/x86/include/asm/sections.h |   4 +
>>>  arch/x86/kernel/module.c| 181 +++-
>>>  arch/x86/kernel/module.lds  |   3 +
>>>  5 files changed, 198 insertions(+), 5 deletions(-)
>>>  create mode 100644 arch/x86/kernel/module.lds
> 
> 
>> Thanks,
>> --
>> ~Randy
> 
> 
> 


-- 
~Randy
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v3 23/27] x86/modules: Adapt module loading for PIE support

2018-05-23 Thread Thomas Garnier via Virtualization
On Wed, May 23, 2018 at 2:27 PM Randy Dunlap  wrote:

> Hi,

> (for several patches in this series:)
> The commit message is confusing.  See below.

Thanks for the edits, I will change the different commit messages.



> On 05/23/2018 12:54 PM, Thomas Garnier wrote:
> > Adapt module loading to support PIE relocations. Generate dynamic GOT if
> > a symbol requires it but no entry exist in the kernel GOT.

>  exists

> >
> > Position Independent Executable (PIE) support will allow to extended the

>  will allow us to extend
the

> > KASLR randomization range below the -2G memory limit.

> Does that say "below th negative 2G memory limit"?
> I don't get it.

Yes, below 0x8000 basically. I think I will just say that.



> >
> > Signed-off-by: Thomas Garnier 
> > ---
> >  arch/x86/Makefile   |   4 +
> >  arch/x86/include/asm/module.h   |  11 ++
> >  arch/x86/include/asm/sections.h |   4 +
> >  arch/x86/kernel/module.c| 181 +++-
> >  arch/x86/kernel/module.lds  |   3 +
> >  5 files changed, 198 insertions(+), 5 deletions(-)
> >  create mode 100644 arch/x86/kernel/module.lds


> Thanks,
> --
> ~Randy



-- 
Thomas
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v3 23/27] x86/modules: Adapt module loading for PIE support

2018-05-23 Thread Randy Dunlap
Hi,

(for several patches in this series:)
The commit message is confusing.  See below.


On 05/23/2018 12:54 PM, Thomas Garnier wrote:
> Adapt module loading to support PIE relocations. Generate dynamic GOT if
> a symbol requires it but no entry exist in the kernel GOT.

exists

> 
> Position Independent Executable (PIE) support will allow to extended the

will allow us to extend the

> KASLR randomization range below the -2G memory limit.

Does that say "below th negative 2G memory limit"?
I don't get it.


> 
> Signed-off-by: Thomas Garnier 
> ---
>  arch/x86/Makefile   |   4 +
>  arch/x86/include/asm/module.h   |  11 ++
>  arch/x86/include/asm/sections.h |   4 +
>  arch/x86/kernel/module.c| 181 +++-
>  arch/x86/kernel/module.lds  |   3 +
>  5 files changed, 198 insertions(+), 5 deletions(-)
>  create mode 100644 arch/x86/kernel/module.lds


Thanks,
-- 
~Randy
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v3 23/27] x86/modules: Adapt module loading for PIE support

2018-05-23 Thread Thomas Garnier via Virtualization
Adapt module loading to support PIE relocations. Generate dynamic GOT if
a symbol requires it but no entry exist in the kernel GOT.

Position Independent Executable (PIE) support will allow to extended the
KASLR randomization range below the -2G memory limit.

Signed-off-by: Thomas Garnier 
---
 arch/x86/Makefile   |   4 +
 arch/x86/include/asm/module.h   |  11 ++
 arch/x86/include/asm/sections.h |   4 +
 arch/x86/kernel/module.c| 181 +++-
 arch/x86/kernel/module.lds  |   3 +
 5 files changed, 198 insertions(+), 5 deletions(-)
 create mode 100644 arch/x86/kernel/module.lds

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 277ffc57ae13..20bb6cbd8938 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -134,7 +134,11 @@ else
 KBUILD_CFLAGS += $(cflags-y)
 
 KBUILD_CFLAGS += -mno-red-zone
+ifdef CONFIG_X86_PIE
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/x86/kernel/module.lds
+else
 KBUILD_CFLAGS += -mcmodel=kernel
+endif
 
 # -funit-at-a-time shrinks the kernel .text considerably
 # unfortunately it makes reading oopses harder.
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 7948a17febb4..68ff05e14288 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,12 +5,23 @@
 #include 
 #include 
 
+#ifdef CONFIG_X86_PIE
+struct mod_got_sec {
+   struct elf64_shdr   *got;
+   int got_num_entries;
+   int got_max_entries;
+};
+#endif
+
 struct mod_arch_specific {
 #ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
 #endif
+#ifdef CONFIG_X86_PIE
+   struct mod_got_sec  core;
+#endif
 };
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index da3d98bb2bcb..89b3a95c8d11 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -17,4 +17,8 @@ extern char __entry_trampoline_start[], 
__entry_trampoline_end[];
 extern char __start_got[], __end_got[];
 #endif
 
+#if defined(CONFIG_X86_PIE)
+extern char __start_got[], __end_got[];
+#endif
+
 #endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f58336af095c..88895f3d474b 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -77,6 +78,173 @@ static unsigned long int get_module_load_offset(void)
 }
 #endif
 
+#ifdef CONFIG_X86_PIE
+static u64 find_got_kernel_entry(Elf64_Sym *sym, const Elf64_Rela *rela)
+{
+   u64 *pos;
+
+   for (pos = (u64*)__start_got; pos < (u64*)__end_got; pos++) {
+   if (*pos == sym->st_value)
+   return (u64)pos + rela->r_addend;
+   }
+
+   return 0;
+}
+
+static u64 module_emit_got_entry(struct module *mod, void *loc,
+const Elf64_Rela *rela, Elf64_Sym *sym)
+{
+   struct mod_got_sec *gotsec = >arch.core;
+   u64 *got = (u64*)gotsec->got->sh_addr;
+   int i = gotsec->got_num_entries;
+   u64 ret;
+
+   /* Check if we can use the kernel GOT */
+   ret = find_got_kernel_entry(sym, rela);
+   if (ret)
+   return ret;
+
+   got[i] = sym->st_value;
+
+   /*
+* Check if the entry we just created is a duplicate. Given that the
+* relocations are sorted, this will be the last entry we allocated.
+* (if one exists).
+*/
+   if (i > 0 && got[i] == got[i - 2]) {
+   ret = (u64)[i - 1];
+   } else {
+   gotsec->got_num_entries++;
+   BUG_ON(gotsec->got_num_entries > gotsec->got_max_entries);
+   ret = (u64)[i];
+   }
+
+   return ret + rela->r_addend;
+}
+
+#define cmp_3way(a,b)  ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+   const Elf64_Rela *x = a, *y = b;
+   int i;
+
+   /* sort by type, symbol index and addend */
+   i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+   if (i == 0)
+   i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+   if (i == 0)
+   i = cmp_3way(x->r_addend, y->r_addend);
+   return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+   /*
+* Entries are sorted by type, symbol index and addend. That means
+* that, if a duplicate entry exists, it must be in the preceding
+* slot.
+*/
+   return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_gots(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+{
+   unsigned int ret = 0;
+   Elf64_Sym *s;
+   int i;
+
+   for (i = 0; i < num; i++) {
+   switch