Author: arekm                        Date: Tue Dec 21 20:58:30 2010 GMT
Module: packages                      Tag: HEAD
---- Log message:
- rel 2; update vserver (patch-2.6.36.2-vs2.3.0.36.38.2.diff) and grsecurity 
(grsecurity-2.2.1-2.6.36.2-201012192125.patch) patches

---- Files affected:
packages/kernel:
   kernel-grsec_full.patch (1.53 -> 1.54) , kernel-vserver-2.3.patch (1.51 -> 
1.52) , kernel-vserver-fixes.patch (1.18 -> 1.19) , kernel.spec (1.864 -> 
1.865) 

---- Diffs:

================================================================
Index: packages/kernel/kernel-grsec_full.patch
diff -u packages/kernel/kernel-grsec_full.patch:1.53 
packages/kernel/kernel-grsec_full.patch:1.54
--- packages/kernel/kernel-grsec_full.patch:1.53        Fri Dec 10 09:36:27 2010
+++ packages/kernel/kernel-grsec_full.patch     Tue Dec 21 21:58:24 2010
@@ -7531,46 +7531,11 @@
  #endif /* _ASM_X86_ELF_H */
 diff -urNp linux-2.6.36.2/arch/x86/include/asm/futex.h 
linux-2.6.36.2/arch/x86/include/asm/futex.h
 --- linux-2.6.36.2/arch/x86/include/asm/futex.h        2010-10-20 
16:30:22.000000000 -0400
-+++ linux-2.6.36.2/arch/x86/include/asm/futex.h        2010-12-09 
20:24:53.000000000 -0500
-@@ -11,17 +11,54 @@
- #include <asm/processor.h>
++++ linux-2.6.36.2/arch/x86/include/asm/futex.h        2010-12-19 
12:46:43.000000000 -0500
+@@ -12,16 +12,18 @@
  #include <asm/system.h>
  
-+#ifdef CONFIG_X86_32
  #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)   \
-+      asm volatile(                                           \
-+                   "movw\t%w6, %%ds\n"                        \
-+                   "1:\t" insn "\n"                           \
-+                   "2:\tpushl\t%%ss\n"                        \
-+                   "\tpopl\t%%ds\n"                           \
-+                   "\t.section .fixup,\"ax\"\n"               \
-+                   "3:\tmov\t%3, %1\n"                        \
-+                   "\tjmp\t2b\n"                              \
-+                   "\t.previous\n"                            \
-+                   _ASM_EXTABLE(1b, 3b)                       \
-+                   : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
-+                   : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
-+
-+#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)   \
-+      asm volatile("movw\t%w7, %%es\n"                        \
-+                   "1:\tmovl\t%%es:%2, %0\n"                  \
-+                   "\tmovl\t%0, %3\n"                         \
-+                   "\t" insn "\n"                             \
-+                   "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
-+                   "\tjnz\t1b\n"                              \
-+                   "3:\tpushl\t%%ss\n"                        \
-+                   "\tpopl\t%%es\n"                           \
-+                   "\t.section .fixup,\"ax\"\n"               \
-+                   "4:\tmov\t%5, %1\n"                        \
-+                   "\tjmp\t3b\n"                              \
-+                   "\t.previous\n"                            \
-+                   _ASM_EXTABLE(1b, 4b)                       \
-+                   _ASM_EXTABLE(2b, 4b)                       \
-+                   : "=&a" (oldval), "=&r" (ret),             \
-+                     "+m" (*uaddr), "=&r" (tem)               \
-+                   : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
-+#else
-+#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)   \
 +      typecheck(u32 *, uaddr);                                \
        asm volatile("1:\t" insn "\n"                           \
                     "2:\t.section .fixup,\"ax\"\n"             \
@@ -7579,8 +7544,7 @@
                     "\t.previous\n"                            \
                     _ASM_EXTABLE(1b, 3b)                       \
 -                   : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
-+                   : "=r" (oldval), "=r" (ret),               \
-+                     "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\
++                   : "=r" (oldval), "=r" (ret), "+m" (*____m(uaddr))\
                     : "i" (-EFAULT), "0" (oparg), "1" (0))
  
  #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)   \
@@ -7588,43 +7552,33 @@
        asm volatile("1:\tmovl  %2, %0\n"                       \
                     "\tmovl\t%0, %3\n"                         \
                     "\t" insn "\n"                             \
-@@ -34,10 +71,12 @@
+@@ -34,10 +36,10 @@
                     _ASM_EXTABLE(1b, 4b)                       \
                     _ASM_EXTABLE(2b, 4b)                       \
                     : "=&a" (oldval), "=&r" (ret),             \
 -                     "+m" (*uaddr), "=&r" (tem)               \
-+                     "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\
-+                     "=&r" (tem)                              \
++                     "+m" (*(____m(uaddr))), "=&r" (tem)      \
                     : "r" (oparg), "i" (-EFAULT), "1" (0))
-+#endif
  
 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
  {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
-@@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser
+@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
  
        switch (op) {
        case FUTEX_OP_SET:
-+#ifdef CONFIG_X86_32
-+              __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, 
oparg);
-+#else
-               __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
-+#endif
+-              __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++              __futex_atomic_op1("xchgl %0, "__copyuser_seg"%2", ret, oldval, 
uaddr, oparg);
                break;
        case FUTEX_OP_ADD:
-+#ifdef CONFIG_X86_32
-+              __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
-+                                 uaddr, oparg);
-+#else
-               __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+-              __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++              __futex_atomic_op1(LOCK_PREFIX "xaddl %0, "__copyuser_seg"%2", 
ret, oldval,
                                   uaddr, oparg);
-+#endif
                break;
        case FUTEX_OP_OR:
-               __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
-@@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser
+@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
        return ret;
  }
  
@@ -7633,7 +7587,7 @@
                                                int newval)
  {
  
-@@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_i
+@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
                return -ENOSYS;
  #endif
  
@@ -7642,32 +7596,17 @@
                return -EFAULT;
  
 -      asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
--                   "2:\t.section .fixup, \"ax\"\n"
-+      asm volatile(
-+#ifdef CONFIG_X86_32
-+                   "\tmovw %w5, %%ds\n"
-+                   "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n"
-+                   "2:\tpushl   %%ss\n"
-+                   "\tpopl    %%ds\n"
-+#else
-+                   "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
-+                   "2:\n"
-+#endif
-+                   "\t.section .fixup, \"ax\"\n"
++      asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, "__copyuser_seg"%1\n"
+                    "2:\t.section .fixup, \"ax\"\n"
                     "3:\tmov     %2, %0\n"
                     "\tjmp     2b\n"
                     "\t.previous\n"
                     _ASM_EXTABLE(1b, 3b)
-+#ifdef CONFIG_X86_32
-                    : "=a" (oldval), "+m" (*uaddr)
-+                   : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" 
(__USER_DS)
-+#else
-+                   : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))
+-                   : "=a" (oldval), "+m" (*uaddr)
++                   : "=a" (oldval), "+m" (*____m(uaddr))
                     : "i" (-EFAULT), "r" (newval), "0" (oldval)
-+#endif
                     : "memory"
        );
- 
 diff -urNp linux-2.6.36.2/arch/x86/include/asm/i387.h 
linux-2.6.36.2/arch/x86/include/asm/i387.h
 --- linux-2.6.36.2/arch/x86/include/asm/i387.h 2010-10-20 16:30:22.000000000 
-0400
 +++ linux-2.6.36.2/arch/x86/include/asm/i387.h 2010-12-09 20:24:53.000000000 
-0500
@@ -9419,6 +9358,18 @@
                     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
  }
  
+diff -urNp linux-2.6.36.2/arch/x86/include/asm/stackprotector.h 
linux-2.6.36.2/arch/x86/include/asm/stackprotector.h
+--- linux-2.6.36.2/arch/x86/include/asm/stackprotector.h       2010-10-20 
16:30:22.000000000 -0400
++++ linux-2.6.36.2/arch/x86/include/asm/stackprotector.h       2010-12-19 
12:46:50.000000000 -0500
+@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
+ 
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+       asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
 diff -urNp linux-2.6.36.2/arch/x86/include/asm/system.h 
linux-2.6.36.2/arch/x86/include/asm/system.h
 --- linux-2.6.36.2/arch/x86/include/asm/system.h       2010-10-20 
16:30:22.000000000 -0400
 +++ linux-2.6.36.2/arch/x86/include/asm/system.h       2010-12-09 
20:24:53.000000000 -0500
@@ -9936,7 +9887,7 @@
  #endif /* _ASM_X86_UACCESS_64_H */
 diff -urNp linux-2.6.36.2/arch/x86/include/asm/uaccess.h 
linux-2.6.36.2/arch/x86/include/asm/uaccess.h
 --- linux-2.6.36.2/arch/x86/include/asm/uaccess.h      2010-10-20 
16:30:22.000000000 -0400
-+++ linux-2.6.36.2/arch/x86/include/asm/uaccess.h      2010-12-09 
20:24:53.000000000 -0500
++++ linux-2.6.36.2/arch/x86/include/asm/uaccess.h      2010-12-19 
12:46:43.000000000 -0500
 @@ -8,12 +8,15 @@
  #include <linux/thread_info.h>
  #include <linux/prefetch.h>
@@ -9953,12 +9904,11 @@
  /*
   * The fs value determines whether argument validity checking should be
   * performed or not.  If get_fs() == USER_DS, checking is performed, with
-@@ -29,7 +32,12 @@
+@@ -29,7 +32,11 @@
  
  #define get_ds()      (KERNEL_DS)
  #define get_fs()      (current_thread_info()->addr_limit)
-+#ifdef CONFIG_X86_32
-+void __set_fs(mm_segment_t x, int cpu);
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
 +void set_fs(mm_segment_t x);
 +#else
  #define set_fs(x)     (current_thread_info()->addr_limit = (x))
@@ -9966,7 +9916,7 @@
  
  #define segment_eq(a, b)      ((a).seg == (b).seg)
  
-@@ -77,7 +85,33 @@
+@@ -77,7 +84,33 @@
   * checks that the pointer is in the user space range - after calling
   * this function, memory access functions may still return -EFAULT.
   */
@@ -10001,92 +9951,69 @@
  
  /*
   * The exception table consists of pairs of addresses: the first is the
-@@ -183,13 +217,21 @@ extern int __get_user_bad(void);
+@@ -183,12 +216,20 @@ extern int __get_user_bad(void);
        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
                     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
  
 -
-+#ifdef CONFIG_X86_32
-+#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
-+#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
-+#else
-+#define _ASM_LOAD_USER_DS(ds)
-+#define _ASM_LOAD_KERNEL_DS
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "%%gs:"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
 +#endif
  
  #ifdef CONFIG_X86_32
  #define __put_user_asm_u64(x, addr, err, errret)                      \
 -      asm volatile("1:        movl %%eax,0(%2)\n"                     \
 -                   "2:        movl %%edx,4(%2)\n"                     \
-+      asm volatile(_ASM_LOAD_USER_DS(5)                               \
-+                   "1:        movl %%eax,%%ds:0(%2)\n"                \
-+                   "2:        movl %%edx,%%ds:4(%2)\n"                \
++      asm volatile("1:        movl %%eax," __copyuser_seg"0(%2)\n"    \
++                   "2:        movl %%edx," __copyuser_seg"4(%2)\n"    \
                     "3:\n"                                             \
-+                   _ASM_LOAD_KERNEL_DS                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "4:        movl %3,%0\n"                           \
-                    "  jmp 3b\n"                                       \
-@@ -197,15 +239,18 @@ extern int __get_user_bad(void);
-                    _ASM_EXTABLE(1b, 4b)                               \
-                    _ASM_EXTABLE(2b, 4b)                               \
-                    : "=r" (err)                                       \
--                   : "A" (x), "r" (addr), "i" (errret), "0" (err))
-+                   : "A" (x), "r" (addr), "i" (errret), "0" (err),    \
-+                     "r"(__USER_DS))
+@@ -200,8 +241,8 @@ extern int __get_user_bad(void);
+                    : "A" (x), "r" (addr), "i" (errret), "0" (err))
  
  #define __put_user_asm_ex_u64(x, addr)                                        
\
 -      asm volatile("1:        movl %%eax,0(%1)\n"                     \
 -                   "2:        movl %%edx,4(%1)\n"                     \
-+      asm volatile(_ASM_LOAD_USER_DS(2)                               \
-+                   "1:        movl %%eax,%%ds:0(%1)\n"                \
-+                   "2:        movl %%edx,%%ds:4(%1)\n"                \
++      asm volatile("1:        movl %%eax," __copyuser_seg"0(%1)\n"    \
++                   "2:        movl %%edx," __copyuser_seg"4(%1)\n"    \
                     "3:\n"                                             \
-+                   _ASM_LOAD_KERNEL_DS                                \
                     _ASM_EXTABLE(1b, 2b - 1b)                          \
                     _ASM_EXTABLE(2b, 3b - 2b)                          \
--                   : : "A" (x), "r" (addr))
-+                   : : "A" (x), "r" (addr), "r"(__USER_DS))
- 
- #define __put_user_x8(x, ptr, __ret_pu)                               \
-       asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
-@@ -374,16 +419,18 @@ do {                                                     
                \
+@@ -374,7 +415,7 @@ do {                                                       
                \
  } while (0)
  
  #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
 -      asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
-+      asm volatile(_ASM_LOAD_USER_DS(5)                               \
-+                   "1:        mov"itype" %%ds:%2,%"rtype"1\n"         \
++      asm volatile("1:        mov"itype" "__copyuser_seg"%2,%"rtype"1\n"\
                     "2:\n"                                             \
-+                   _ASM_LOAD_KERNEL_DS                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:        mov %3,%0\n"                            \
-                    "  xor"itype" %"rtype"1,%"rtype"1\n"               \
+@@ -382,7 +423,7 @@ do {                                                       
                \
                     "  jmp 2b\n"                                       \
                     ".previous\n"                                      \
                     _ASM_EXTABLE(1b, 3b)                               \
 -                   : "=r" (err), ltype(x)                             \
--                   : "m" (__m(addr)), "i" (errret), "0" (err))
 +                   : "=r" (err), ltype (x)                            \
-+                   : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
+                    : "m" (__m(addr)), "i" (errret), "0" (err))
  
  #define __get_user_size_ex(x, ptr, size)                              \
- do {                                                                  \
-@@ -407,10 +454,12 @@ do {                                                     
                \
+@@ -407,7 +448,7 @@ do {                                                       
                \
  } while (0)
  
  #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                       
\
 -      asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
-+      asm volatile(_ASM_LOAD_USER_DS(2)                               \
-+                   "1:        mov"itype" %%ds:%1,%"rtype"0\n"         \
++      asm volatile("1:        mov"itype" "__copyuser_seg"%1,%"rtype"0\n"\
                     "2:\n"                                             \
-+                   _ASM_LOAD_KERNEL_DS                                \
                     _ASM_EXTABLE(1b, 2b - 1b)                          \
--                   : ltype(x) : "m" (__m(addr)))
-+                   : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
- 
- #define __put_user_nocheck(x, ptr, size)                      \
- ({                                                            \
-@@ -424,13 +473,24 @@ do {                                                     
                \
+                    : ltype(x) : "m" (__m(addr)))
+@@ -424,13 +465,24 @@ do {                                                     
                \
        int __gu_err;                                                   \
        unsigned long __gu_val;                                         \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
@@ -10113,38 +10040,29 @@
  
  /*
   * Tell gcc we read from memory instead of writing: this is because
-@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
+@@ -438,7 +490,7 @@ struct __large_struct { unsigned long bu
   * aliasing issues.
   */
  #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
 -      asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
-+      asm volatile(_ASM_LOAD_USER_DS(5)                               \
-+                   "1:        mov"itype" %"rtype"1,%%ds:%2\n"         \
++      asm volatile("1:        mov"itype" %"rtype"1," __copyuser_seg"%2\n"\
                     "2:\n"                                             \
-+                   _ASM_LOAD_KERNEL_DS                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:        mov %3,%0\n"                            \
-                    "  jmp 2b\n"                                       \
+@@ -446,10 +498,10 @@ struct __large_struct { unsigned long bu
                     ".previous\n"                                      \
                     _ASM_EXTABLE(1b, 3b)                               \
                     : "=r"(err)                                        \
 -                   : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
-+                   : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
-+                     "r"(__USER_DS))
++                   : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
  
  #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                       
\
 -      asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
-+      asm volatile(_ASM_LOAD_USER_DS(2)                               \
-+                   "1:        mov"itype" %"rtype"0,%%ds:%1\n"         \
++      asm volatile("1:        mov"itype" %"rtype"0," __copyuser_seg"%1\n"\
                     "2:\n"                                             \
-+                   _ASM_LOAD_KERNEL_DS                                \
                     _ASM_EXTABLE(1b, 2b - 1b)                          \
--                   : : ltype(x), "m" (__m(addr)))
-+                   : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
- 
- /*
-  * uaccess_try and catch
-@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
+                    : : ltype(x), "m" (__m(addr)))
+@@ -530,7 +582,7 @@ struct __large_struct { unsigned long bu
  #define get_user_ex(x, ptr)   do {                                    \
        unsigned long __gue_val;                                        \
        __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
@@ -10153,7 +10071,7 @@
  } while (0)
  
  #ifdef CONFIG_X86_WP_WORKS_OK
-@@ -567,6 +632,7 @@ extern struct movsl_mask {
+@@ -567,6 +619,7 @@ extern struct movsl_mask {
  
  #define ARCH_HAS_NOCACHE_UACCESS 1
  
@@ -10245,7 +10163,16 @@
                             ".section .fixup,\"ax\"\n"
 diff -urNp linux-2.6.36.2/arch/x86/Kconfig linux-2.6.36.2/arch/x86/Kconfig
 --- linux-2.6.36.2/arch/x86/Kconfig    2010-10-20 16:30:22.000000000 -0400
-+++ linux-2.6.36.2/arch/x86/Kconfig    2010-12-09 20:24:54.000000000 -0500
++++ linux-2.6.36.2/arch/x86/Kconfig    2010-12-19 12:46:43.000000000 -0500
+@@ -236,7 +236,7 @@ config X86_TRAMPOLINE
+ 
+ config X86_32_LAZY_GS
+       def_bool y
+-      depends on X86_32 && !CC_STACKPROTECTOR
++      depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+ 
+ config ARCH_HWEIGHT_CFLAGS
+       string
 @@ -1036,7 +1036,7 @@ choice
  
  config NOHIGHMEM
@@ -10282,7 +10209,15 @@
        ---help---
          This enables the kernel to use EFI runtime services that are
          available (such as the EFI variable services).
-@@ -1546,6 +1546,7 @@ config KEXEC_JUMP
+@@ -1489,6 +1489,7 @@ config SECCOMP
+ 
+ config CC_STACKPROTECTOR
+       bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++      depends on X86_64 || !PAX_MEMORY_UDEREF
+       ---help---
+         This option turns on the -fstack-protector GCC feature. This
+         feature puts, at the beginning of functions, a canary value on
+@@ -1546,6 +1547,7 @@ config KEXEC_JUMP
  config PHYSICAL_START
        hex "Physical address where the kernel is loaded" if (EMBEDDED || 
CRASH_DUMP)
        default "0x1000000"
@@ -10290,7 +10225,7 @@
        ---help---
          This gives the physical address where the kernel is loaded.
  
-@@ -1609,6 +1610,7 @@ config X86_NEED_RELOCS
+@@ -1609,6 +1611,7 @@ config X86_NEED_RELOCS
  config PHYSICAL_ALIGN
        hex "Alignment value to which kernel should be aligned" if X86_32
        default "0x1000000"
@@ -10298,7 +10233,7 @@
        range 0x2000 0x1000000
        ---help---
          This value puts the alignment restrictions on physical address
-@@ -1640,9 +1642,10 @@ config HOTPLUG_CPU
+@@ -1640,9 +1643,10 @@ config HOTPLUG_CPU
          Say N if you want to disable CPU hotplug.
  
  config COMPAT_VDSO
@@ -10719,7 +10654,7 @@
        DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
 diff -urNp linux-2.6.36.2/arch/x86/kernel/cpu/common.c 
linux-2.6.36.2/arch/x86/kernel/cpu/common.c
 --- linux-2.6.36.2/arch/x86/kernel/cpu/common.c        2010-10-20 
16:30:22.000000000 -0400
-+++ linux-2.6.36.2/arch/x86/kernel/cpu/common.c        2010-12-09 
20:24:55.000000000 -0500
++++ linux-2.6.36.2/arch/x86/kernel/cpu/common.c        2010-12-19 
12:46:43.000000000 -0500
 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
  
  static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
@@ -10801,6 +10736,15 @@
        /* If the model name is still unset, do table lookup. */
        if (!c->x86_model_id[0]) {
                const char *p;
+@@ -1080,7 +1030,7 @@ struct pt_regs * __cpuinit idle_regs(str
+ {
+       memset(regs, 0, sizeof(struct pt_regs));
+       regs->fs = __KERNEL_PERCPU;
+-      regs->gs = __KERNEL_STACK_CANARY;
++      savesegment(gs, regs->gs);
+ 
+       return regs;
+ }
 @@ -1135,7 +1085,7 @@ void __cpuinit cpu_init(void)
        int i;
  
@@ -11371,8 +11315,22 @@
  efi_rt_function_ptr:
 diff -urNp linux-2.6.36.2/arch/x86/kernel/entry_32.S 
linux-2.6.36.2/arch/x86/kernel/entry_32.S
 --- linux-2.6.36.2/arch/x86/kernel/entry_32.S  2010-10-20 16:30:22.000000000 
-0400
-+++ linux-2.6.36.2/arch/x86/kernel/entry_32.S  2010-12-09 20:24:54.000000000 
-0500
-@@ -192,7 +192,67 @@
++++ linux-2.6.36.2/arch/x86/kernel/entry_32.S  2010-12-19 12:47:27.000000000 
-0500
+@@ -186,13 +186,81 @@
+       /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+       movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++      movl $(__USER_DS), \reg
++#else
++      xorl \reg, \reg
++#endif
++
+       movl \reg, %gs
+ .endm
  
  #endif        /* CONFIG_X86_32_LAZY_GS */
  
@@ -11441,7 +11399,7 @@
        cld
        PUSH_GS
        pushl %fs
-@@ -225,7 +285,7 @@
+@@ -225,7 +293,7 @@
        pushl %ebx
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET ebx, 0
@@ -11450,7 +11408,7 @@
        movl %edx, %ds
        movl %edx, %es
        movl $(__KERNEL_PERCPU), %edx
-@@ -233,6 +293,15 @@
+@@ -233,6 +301,15 @@
        SET_KERNEL_GS %edx
  .endm
  
@@ -11466,7 +11424,7 @@
  .macro RESTORE_INT_REGS
        popl %ebx
        CFI_ADJUST_CFA_OFFSET -4
-@@ -357,7 +426,15 @@ check_userspace:
+@@ -357,7 +434,15 @@ check_userspace:
        movb PT_CS(%esp), %al
        andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
        cmpl $USER_RPL, %eax
@@ -11482,7 +11440,7 @@
  
  ENTRY(resume_userspace)
        LOCKDEP_SYS_EXIT
-@@ -423,10 +500,9 @@ sysenter_past_esp:
+@@ -423,10 +508,9 @@ sysenter_past_esp:
        /*CFI_REL_OFFSET cs, 0*/
        /*
         * Push current_thread_info()->sysenter_return to the stack.
@@ -11495,7 +11453,7 @@
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET eip, 0
  
-@@ -439,9 +515,19 @@ sysenter_past_esp:
+@@ -439,9 +523,19 @@ sysenter_past_esp:
   * Load the potential sixth argument from user stack.
   * Careful about security.
   */
@@ -11515,7 +11473,7 @@
        movl %ebp,PT_EBP(%esp)
  .section __ex_table,"a"
        .align 4
-@@ -464,12 +550,23 @@ sysenter_do_call:
+@@ -464,12 +558,23 @@ sysenter_do_call:
        testl $_TIF_ALLWORK_MASK, %ecx
        jne sysexit_audit
  sysenter_exit:
@@ -11539,7 +11497,7 @@
        PTGS_TO_GS
        ENABLE_INTERRUPTS_SYSEXIT
  
-@@ -513,11 +610,17 @@ sysexit_audit:
+@@ -513,11 +618,17 @@ sysexit_audit:
  
        CFI_ENDPROC
  .pushsection .fixup,"ax"
@@ -11559,7 +11517,7 @@
  .popsection
        PTGS_TO_GS_EX
  ENDPROC(ia32_sysenter_target)
-@@ -551,6 +654,10 @@ syscall_exit:
+@@ -551,6 +662,10 @@ syscall_exit:
        testl $_TIF_ALLWORK_MASK, %ecx  # current->work
        jne syscall_exit_work
  
@@ -11570,7 +11528,7 @@
  restore_all:
        TRACE_IRQS_IRET
  restore_all_notrace:
-@@ -611,14 +718,21 @@ ldt_ss:
+@@ -611,14 +726,21 @@ ldt_ss:
   * compensating for the offset by changing to the ESPFIX segment with
   * a base address that matches for the difference.
   */
@@ -11595,7 +11553,7 @@
        pushl $__ESPFIX_SS
        CFI_ADJUST_CFA_OFFSET 4
        push %eax                       /* new kernel esp */
-@@ -655,25 +769,19 @@ work_resched:
+@@ -655,25 +777,19 @@ work_resched:
  
  work_notifysig:                               # deal with pending signals and
                                        # notify-resume requests
@@ -11624,7 +11582,7 @@
  #endif
        xorl %edx, %edx
        call do_notify_resume
-@@ -708,6 +816,10 @@ END(syscall_exit_work)
+@@ -708,6 +824,10 @@ END(syscall_exit_work)
  
        RING0_INT_FRAME                 # can't unwind into user space anyway
  syscall_fault:
@@ -11635,7 +11593,39 @@
        GET_THREAD_INFO(%ebp)
        movl $-EFAULT,PT_EAX(%esp)
        jmp resume_userspace
-@@ -791,8 +903,15 @@ ptregs_clone:
+@@ -782,6 +902,31 @@ ptregs_clone:
+       addl $8,%esp
+       ret
+ 
++      ALIGN;
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel-grsec_full.patch?r1=1.53&r2=1.54&f=u
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel-vserver-2.3.patch?r1=1.51&r2=1.52&f=u
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel-vserver-fixes.patch?r1=1.18&r2=1.19&f=u
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel.spec?r1=1.864&r2=1.865&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to