[tip:x86/vdso] x86, vdso: Move the vvar area before the vdso text

2014-07-11 Thread tip-bot for Andy Lutomirski
Commit-ID:  e6577a7ce99a506b587bcd1d2cd803cb45119557
Gitweb: http://git.kernel.org/tip/e6577a7ce99a506b587bcd1d2cd803cb45119557
Author: Andy Lutomirski 
AuthorDate: Thu, 10 Jul 2014 18:13:15 -0700
Committer:  H. Peter Anvin 
CommitDate: Fri, 11 Jul 2014 16:57:51 -0700

x86, vdso: Move the vvar area before the vdso text

Putting the vvar area after the vdso text is rather complicated: it
only works of the total length of the vdso text mapping is known at
vdso link time, and the linker doesn't allow symbol addresses to
depend on the sizes of non-allocatable data after the PT_LOAD
segment.

Moving the vvar area before the vdso text will allow is to safely
map non-allocatable data after the vdso text, which is a nice
simplification.

Signed-off-by: Andy Lutomirski 
Link: 
http://lkml.kernel.org/r/156c78c0d93144ff1055a66493783b9e56813983.1405040914.git.l...@amacapital.net
Signed-off-by: H. Peter Anvin 
---
 arch/x86/include/asm/vdso.h | 18 -
 arch/x86/vdso/vdso-layout.lds.S | 44 ++---
 arch/x86/vdso/vdso2c.c  | 12 ++-
 arch/x86/vdso/vdso2c.h  | 25 ++-
 arch/x86/vdso/vma.c | 20 ++-
 5 files changed, 62 insertions(+), 57 deletions(-)

diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 30be253..8021bd2 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -18,15 +18,15 @@ struct vdso_image {
 
unsigned long alt, alt_len;
 
-   unsigned long sym_end_mapping;  /* Total size of the mapping */
-
-   unsigned long sym_vvar_page;
-   unsigned long sym_hpet_page;
-   unsigned long sym_VDSO32_NOTE_MASK;
-   unsigned long sym___kernel_sigreturn;
-   unsigned long sym___kernel_rt_sigreturn;
-   unsigned long sym___kernel_vsyscall;
-   unsigned long sym_VDSO32_SYSENTER_RETURN;
+   long sym_vvar_start;  /* Negative offset to the vvar area */
+
+   long sym_vvar_page;
+   long sym_hpet_page;
+   long sym_VDSO32_NOTE_MASK;
+   long sym___kernel_sigreturn;
+   long sym___kernel_rt_sigreturn;
+   long sym___kernel_vsyscall;
+   long sym_VDSO32_SYSENTER_RETURN;
 };
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
index 9197544..de2c921 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/vdso/vdso-layout.lds.S
@@ -18,6 +18,25 @@
 
 SECTIONS
 {
+   /*
+* User/kernel shared data is before the vDSO.  This may be a little
+* uglier than putting it after the vDSO, but it avoids issues with
+* non-allocatable things that dangle past the end of the PT_LOAD
+* segment.
+*/
+
+   vvar_start = . - 2 * PAGE_SIZE;
+   vvar_page = vvar_start;
+
+   /* Place all vvars at the offsets in asm/vvar.h. */
+#define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
+#define __VVAR_KERNEL_LDS
+#include 
+#undef __VVAR_KERNEL_LDS
+#undef EMIT_VVAR
+
+   hpet_page = vvar_start + PAGE_SIZE;
+
. = SIZEOF_HEADERS;
 
.hash   : { *(.hash) }  :text
@@ -74,31 +93,6 @@ SECTIONS
.altinstructions: { *(.altinstructions) }   :text
.altinstr_replacement   : { *(.altinstr_replacement) }  :text
 
-   /*
-* The remainder of the vDSO consists of special pages that are
-* shared between the kernel and userspace.  It needs to be at the
-* end so that it doesn't overlap the mapping of the actual
-* vDSO image.
-*/
-
-   . = ALIGN(PAGE_SIZE);
-   vvar_page = .;
-
-   /* Place all vvars at the offsets in asm/vvar.h. */
-#define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
-#define __VVAR_KERNEL_LDS
-#include 
-#undef __VVAR_KERNEL_LDS
-#undef EMIT_VVAR
-
-   . = vvar_page + PAGE_SIZE;
-
-   hpet_page = .;
-   . = . + PAGE_SIZE;
-
-   . = ALIGN(PAGE_SIZE);
-   end_mapping = .;
-
/DISCARD/ : {
*(.discard)
*(.discard.*)
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 238dbe82..22c54d0 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -20,9 +20,9 @@ const char *outfilename;
 
 /* Symbols that we need in vdso2c. */
 enum {
+   sym_vvar_start,
sym_vvar_page,
sym_hpet_page,
-   sym_end_mapping,
sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END,
 };
@@ -38,9 +38,9 @@ struct vdso_sym {
 };
 
 struct vdso_sym required_syms[] = {
+   [sym_vvar_start] = {"vvar_start", true},
[sym_vvar_page] = {"vvar_page", true},
[sym_hpet_page] = {"hpet_page", true},
-   [sym_end_mapping] = {"end_mapping", true},
[sym_VDSO_FAKE_SECTION_TABLE_START] = {
"VDSO_FAKE_SECTION_TABLE_START", false
},
@@ -96,9 +96,11 @@ extern void bad_put_le(void);
 
 #define NSYMS 

[tip:x86/vdso] x86, vdso: Move the vvar area before the vdso text

2014-07-11 Thread tip-bot for Andy Lutomirski
Commit-ID:  e6577a7ce99a506b587bcd1d2cd803cb45119557
Gitweb: http://git.kernel.org/tip/e6577a7ce99a506b587bcd1d2cd803cb45119557
Author: Andy Lutomirski l...@amacapital.net
AuthorDate: Thu, 10 Jul 2014 18:13:15 -0700
Committer:  H. Peter Anvin h...@linux.intel.com
CommitDate: Fri, 11 Jul 2014 16:57:51 -0700

x86, vdso: Move the vvar area before the vdso text

Putting the vvar area after the vdso text is rather complicated: it
only works of the total length of the vdso text mapping is known at
vdso link time, and the linker doesn't allow symbol addresses to
depend on the sizes of non-allocatable data after the PT_LOAD
segment.

Moving the vvar area before the vdso text will allow is to safely
map non-allocatable data after the vdso text, which is a nice
simplification.

Signed-off-by: Andy Lutomirski l...@amacapital.net
Link: 
http://lkml.kernel.org/r/156c78c0d93144ff1055a66493783b9e56813983.1405040914.git.l...@amacapital.net
Signed-off-by: H. Peter Anvin h...@linux.intel.com
---
 arch/x86/include/asm/vdso.h | 18 -
 arch/x86/vdso/vdso-layout.lds.S | 44 ++---
 arch/x86/vdso/vdso2c.c  | 12 ++-
 arch/x86/vdso/vdso2c.h  | 25 ++-
 arch/x86/vdso/vma.c | 20 ++-
 5 files changed, 62 insertions(+), 57 deletions(-)

diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 30be253..8021bd2 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -18,15 +18,15 @@ struct vdso_image {
 
unsigned long alt, alt_len;
 
-   unsigned long sym_end_mapping;  /* Total size of the mapping */
-
-   unsigned long sym_vvar_page;
-   unsigned long sym_hpet_page;
-   unsigned long sym_VDSO32_NOTE_MASK;
-   unsigned long sym___kernel_sigreturn;
-   unsigned long sym___kernel_rt_sigreturn;
-   unsigned long sym___kernel_vsyscall;
-   unsigned long sym_VDSO32_SYSENTER_RETURN;
+   long sym_vvar_start;  /* Negative offset to the vvar area */
+
+   long sym_vvar_page;
+   long sym_hpet_page;
+   long sym_VDSO32_NOTE_MASK;
+   long sym___kernel_sigreturn;
+   long sym___kernel_rt_sigreturn;
+   long sym___kernel_vsyscall;
+   long sym_VDSO32_SYSENTER_RETURN;
 };
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
index 9197544..de2c921 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/vdso/vdso-layout.lds.S
@@ -18,6 +18,25 @@
 
 SECTIONS
 {
+   /*
+* User/kernel shared data is before the vDSO.  This may be a little
+* uglier than putting it after the vDSO, but it avoids issues with
+* non-allocatable things that dangle past the end of the PT_LOAD
+* segment.
+*/
+
+   vvar_start = . - 2 * PAGE_SIZE;
+   vvar_page = vvar_start;
+
+   /* Place all vvars at the offsets in asm/vvar.h. */
+#define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
+#define __VVAR_KERNEL_LDS
+#include asm/vvar.h
+#undef __VVAR_KERNEL_LDS
+#undef EMIT_VVAR
+
+   hpet_page = vvar_start + PAGE_SIZE;
+
. = SIZEOF_HEADERS;
 
.hash   : { *(.hash) }  :text
@@ -74,31 +93,6 @@ SECTIONS
.altinstructions: { *(.altinstructions) }   :text
.altinstr_replacement   : { *(.altinstr_replacement) }  :text
 
-   /*
-* The remainder of the vDSO consists of special pages that are
-* shared between the kernel and userspace.  It needs to be at the
-* end so that it doesn't overlap the mapping of the actual
-* vDSO image.
-*/
-
-   . = ALIGN(PAGE_SIZE);
-   vvar_page = .;
-
-   /* Place all vvars at the offsets in asm/vvar.h. */
-#define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
-#define __VVAR_KERNEL_LDS
-#include asm/vvar.h
-#undef __VVAR_KERNEL_LDS
-#undef EMIT_VVAR
-
-   . = vvar_page + PAGE_SIZE;
-
-   hpet_page = .;
-   . = . + PAGE_SIZE;
-
-   . = ALIGN(PAGE_SIZE);
-   end_mapping = .;
-
/DISCARD/ : {
*(.discard)
*(.discard.*)
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 238dbe82..22c54d0 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -20,9 +20,9 @@ const char *outfilename;
 
 /* Symbols that we need in vdso2c. */
 enum {
+   sym_vvar_start,
sym_vvar_page,
sym_hpet_page,
-   sym_end_mapping,
sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END,
 };
@@ -38,9 +38,9 @@ struct vdso_sym {
 };
 
 struct vdso_sym required_syms[] = {
+   [sym_vvar_start] = {vvar_start, true},
[sym_vvar_page] = {vvar_page, true},
[sym_hpet_page] = {hpet_page, true},
-   [sym_end_mapping] = {end_mapping, true},
[sym_VDSO_FAKE_SECTION_TABLE_START] = {
VDSO_FAKE_SECTION_TABLE_START, false