Author: zbyniu                       Date: Sat Sep  8 15:59:29 2007 GMT
Module: SOURCES                       Tag: LINUX_2_6
---- Log message:
- merged changes from grsecurity-2.1.11-2.6.22.6-200709071800.patch

---- Files affected:
SOURCES:
   linux-2.6-grsec_full.patch (1.1.2.19 -> 1.1.2.20) 

---- Diffs:

================================================================
Index: SOURCES/linux-2.6-grsec_full.patch
diff -u SOURCES/linux-2.6-grsec_full.patch:1.1.2.19 
SOURCES/linux-2.6-grsec_full.patch:1.1.2.20
--- SOURCES/linux-2.6-grsec_full.patch:1.1.2.19 Fri Aug 31 10:20:35 2007
+++ SOURCES/linux-2.6-grsec_full.patch  Sat Sep  8 17:59:23 2007
@@ -3255,7 +3255,7 @@
  asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
                          unsigned long prot, unsigned long flags,
                          unsigned long fd, unsigned long pgoff)
-@@ -99,6 +114,197 @@ out:
+@@ -99,6 +114,205 @@ out:
        return err;
  }
  
@@ -3339,6 +3339,7 @@
 +              addr = vma->vm_end;
 +              if (mm->start_brk <= addr && addr < mm->mmap_base) {
 +                      start_addr = addr = mm->mmap_base;
++                      mm->cached_hole_size = 0;
 +                      goto full_search;
 +              }
 +      }
@@ -3431,6 +3432,13 @@
 +       * can happen with large stack limits and large mmap()
 +       * allocations.
 +       */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++       if (mm->pax_flags & MF_PAX_SEGMEXEC)
++               mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++       else
++#endif
++
 +      mm->mmap_base = TASK_UNMAPPED_BASE;
 +
 +#ifdef CONFIG_PAX_RANDMMAP
@@ -6473,7 +6481,7 @@
        /* make sure it can fit in the remaining address space */
        if (mm->free_area_cache < len)
                goto fail;
-@@ -325,22 +329,13 @@ try_again:
+@@ -325,22 +329,26 @@ try_again:
  
  fail:
        /*
@@ -6493,8 +6501,21 @@
         * allocations.
         */
 -      mm->free_area_cache = TASK_UNMAPPED_BASE;
-+      mm->mmap_base = TASK_UNMAPPED_BASE;
-+      mm->free_area_cache = mm->mmap_base;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++       if (mm->pax_flags & MF_PAX_SEGMEXEC)
++               mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++       else
++#endif
++
++       mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++       if (mm->pax_flags & MF_PAX_RANDMMAP)
++               mm->mmap_base += mm->delta_mmap;
++#endif
++
++       mm->free_area_cache = mm->mmap_base;
        mm->cached_hole_size = ~0UL;
        addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
                        len, pgoff, flags);
@@ -6945,9 +6966,17 @@
  }
  
  /*
-@@ -67,10 +73,22 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -66,11 +72,30 @@ void arch_pick_mmap_layout(struct mm_str
+       if (sysctl_legacy_va_layout ||
                        (current->personality & ADDR_COMPAT_LAYOUT) ||
                        current->signal->rlim[RLIMIT_STACK].rlim_cur == 
RLIM_INFINITY) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++               if (mm->pax_flags & MF_PAX_SEGMEXEC)
++                       mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++               else
++#endif
++
                mm->mmap_base = TASK_UNMAPPED_BASE;
 +
 +#ifdef CONFIG_PAX_RANDMMAP
@@ -13503,9 +13532,9 @@
        /* no need for flush_tlb */
 -      return;
 +      return 0;
-+out:
-+      __free_page(page);
-+      force_sig(SIGKILL, current);
+ out:
+       __free_page(page);
+       force_sig(SIGKILL, current);
 +      return -ENOMEM;
 +}
 +
@@ -13532,11 +13561,11 @@
 +      /* no need for flush_tlb */
 +      unlock_page(page);
 +      return 0;
- out:
++out:
 +      unlock_page(page);
 +      page_cache_release(page);
-       __free_page(page);
-       force_sig(SIGKILL, current);
++      __free_page(page);
++      force_sig(SIGKILL, current);
 +      return -ENOMEM;
  }
  
@@ -13574,19 +13603,11 @@
                if ((ret = insert_vm_struct(mm, mpnt))) {
                        up_write(&mm->mmap_sem);
                        kmem_cache_free(vm_area_cachep, mpnt);
-@@ -444,17 +498,38 @@ int setup_arg_pages(struct linux_binprm
+@@ -444,17 +498,30 @@ int setup_arg_pages(struct linux_binprm
                mm->stack_vm = mm->total_vm;
        }
  
 -      for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
-+#ifdef CONFIG_PAX_SEGMEXEC
-+              mpnt_m = pax_find_mirror_vma(mpnt);
-+              if (mpnt_m) {
-+                      mm->stack_vm += vma_pages(mpnt);
-+                      mm->total_vm += vma_pages(mpnt);
-+              }
-+#endif
-+
 +      for (i = 0 ; i < MAX_ARG_PAGES ; i++, stack_base += PAGE_SIZE) {
                struct page *page = bprm->page[i];
 -              if (page) {
@@ -23599,7 +23620,7 @@
 diff -urNp linux-2.6.22.1/grsecurity/Kconfig linux-2.6.22.1/grsecurity/Kconfig
 --- linux-2.6.22.1/grsecurity/Kconfig  1969-12-31 19:00:00.000000000 -0500
 +++ linux-2.6.22.1/grsecurity/Kconfig  2007-08-02 11:09:16.000000000 -0400
-@@ -0,0 +1,872 @@
+@@ -0,0 +1,873 @@
 +#
 +# grecurity configuration
 +#
@@ -23745,7 +23766,8 @@
 +      select PAX_EI_PAX
 +      select PAX_PT_PAX_FLAGS
 +      select PAX_HAVE_ACL_FLAGS
-+      select PAX_KERNEXEC if (!X86_64 && !MODULES && 
!HOTPLUG_PCI_COMPAQ_NVRAM && !PCI_BIOS)
++      select PAX_KERNEXEC if (!X86_64 && !EFI && !COMPAT_VDSO && !PARAVIRT && 
X86_WP_WORKS_OK)
++      select PAX_MEMORY_UDEREF if (!X86_64 && !COMPAT_VDSO)
 +      select PAX_RANDKSTACK if (X86_TSC && !X86_64)
 +      select PAX_SEGMEXEC if (X86 && !X86_64)
 +      select PAX_PAGEEXEC if (!X86)
@@ -25600,7 +25622,7 @@
  
  #ifdef CONFIG_SMP
  extern struct cpuinfo_x86 cpu_data[];
-@@ -220,10 +218,19 @@ extern int bootloader_type;
+@@ -220,11 +218,19 @@ extern int bootloader_type;
   */
  #define TASK_SIZE     (PAGE_OFFSET)
  
@@ -25611,15 +25633,15 @@
  /* This decides where the kernel will search for a free chunk of vm
   * space during mmap's.
   */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define TASK_UNMAPPED_BASE    (PAGE_ALIGN((current->mm->pax_flags & 
MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3 : TASK_SIZE/3))
-+#else
  #define TASK_UNMAPPED_BASE    (PAGE_ALIGN(TASK_SIZE / 3))
-+#endif
  
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE   (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
  #define HAVE_ARCH_PICK_MMAP_LAYOUT
  
+ /*
 @@ -345,6 +352,9 @@ struct tss_struct {
  
  #define ARCH_MIN_TASKALIGN    16
@@ -29161,15 +29183,6 @@
        /* a new mm has just been created */
        arch_dup_mmap(oldmm, mm);
        retval = 0;
-@@ -340,7 +370,7 @@ static struct mm_struct * mm_init(struct
-       spin_lock_init(&mm->page_table_lock);
-       rwlock_init(&mm->ioctx_list_lock);
-       mm->ioctx_list = NULL;
--      mm->free_area_cache = TASK_UNMAPPED_BASE;
-+      mm->free_area_cache = ~0UL;
-       mm->cached_hole_size = ~0UL;
- 
-       if (likely(!mm_alloc_pgd(mm))) {
 @@ -461,7 +491,7 @@ void mm_release(struct task_struct *tsk,
        if (tsk->clear_child_tid
            && !(tsk->flags & PF_SIGNALED)
@@ -31244,44 +31257,7 @@
  
  int can_do_mlock(void)
  {
-@@ -31,6 +32,10 @@ static int mlock_fixup(struct vm_area_st
-       int pages;
-       int ret = 0;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+      struct vm_area_struct *vma_m;
-+#endif
-+
-       if (newflags == vma->vm_flags) {
-               *prev = vma;
-               goto out;
-@@ -64,6 +69,13 @@ success:
-        * It's okay if try_to_unmap_one unmaps a page just after we
-        * set VM_LOCKED, make_pages_present below will bring it back.
-        */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+      vma_m = pax_find_mirror_vma(vma);
-+      if (vma_m)
-+              vma_m->vm_flags = newflags & ~(VM_WRITE | VM_MAYWRITE | 
VM_ACCOUNT);
-+#endif
-+
-       vma->vm_flags = newflags;
- 
-       /*
-@@ -77,6 +89,11 @@ success:
-                       ret = make_pages_present(start, end);
-       }
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+      if (vma_m)
-+              pages *= 2;
-+#endif
-+
-       vx_vmlocked_sub(mm, pages);
- out:
-       if (ret == -ENOMEM)
-@@ -95,6 +112,17 @@ static int do_mlock(unsigned long start,
+@@ -95,6 +96,17 @@ static int do_mlock(unsigned long start,
                return -EINVAL;
        if (end == start)
                return 0;
@@ -31320,10 +31296,15 @@
        current->mm->def_flags = def_flags;
        if (flags == MCL_FUTURE)
                goto out;
-@@ -184,6 +213,7 @@ static int do_mlockall(int flags)
+@@ -184,6 +197,12 @@ static int do_mlockall(int flags)
        for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
                unsigned int newflags;
  
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && 
(vma->vm_start >= SEGMEXEC_TASK_SIZE))
++                      break;
++#endif
++
 +              BUG_ON(vma->vm_end > TASK_SIZE);
                newflags = vma->vm_flags | VM_LOCKED;
                if (!(flags & MCL_CURRENT))
@@ -31871,16 +31852,16 @@
 +              BUG_ON(vma->vm_mirror);
 +              return NULL;
 +      }
-+      BUG_ON(vma->vm_end - SEGMEXEC_TASK_SIZE < vma->vm_start - 
SEGMEXEC_TASK_SIZE);
++      BUG_ON(vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < vma->vm_start - 
SEGMEXEC_TASK_SIZE - 1);
 +      vma_m = vma->vm_mirror;
 +      BUG_ON(!vma_m || vma_m->vm_mirror != vma);
 +      BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
 +      BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != 
vma_m->anon_vma);
 +
 +#ifdef CONFIG_PAX_MPROTECT
-+      BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | 
VM_ACCOUNT | VM_MAYNOTWRITE));
++      BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | 
VM_ACCOUNT | VM_LOCKED | VM_MAYNOTWRITE));
 +#else
-+      BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | 
VM_ACCOUNT));
++      BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | 
VM_ACCOUNT | VM_LOCKED));
 +#endif
 +
 +      return vma_m;
@@ -31987,6 +31968,17 @@
  #endif
  
  /*
+@@ -1662,6 +1827,10 @@ static void remove_vma_list(struct mm_st
+       do {
+               long nrpages = vma_pages(vma);
+ 
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_end <= 
SEGMEXEC_TASK_SIZE))
++#endif
++
+               vx_vmpages_sub(mm, nrpages);
+               if (vma->vm_flags & VM_LOCKED)
+                       vx_vmlocked_sub(mm, nrpages);
 @@ -1708,6 +1869,15 @@ detach_vmas_to_be_unmapped(struct mm_str
  
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
@@ -32347,11 +32339,11 @@
 +      struct rb_node **rb_link_m, *rb_parent_m;
 +
 +      BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & 
VM_EXEC));
-+      BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++      BUG_ON(vma->vm_mirror || vma_m->vm_mirror || vma_policy(vma));
 +      *vma_m = *vma;
 +      vma_m->vm_start += SEGMEXEC_TASK_SIZE;
 +      vma_m->vm_end += SEGMEXEC_TASK_SIZE;
-+      vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++      vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
 +      vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
 +      if (vma_m->vm_file)
 +              get_file(vma_m->vm_file);
@@ -32404,18 +32396,6 @@
  
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
-@@ -2170,5 +2559,11 @@ int install_special_mapping(struct mm_st
-       }
- 
-       vx_vmpages_add(mm, len >> PAGE_SHIFT);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+      if (pax_find_mirror_vma(vma))
-+              mm->total_vm += len >> PAGE_SHIFT;
-+#endif
-+
-       return 0;
- }
 diff -urNp linux-2.6.22.1/mm/mprotect.c linux-2.6.22.1/mm/mprotect.c
 --- linux-2.6.22.1/mm/mprotect.c       2007-07-10 14:56:30.000000000 -0400
 +++ linux-2.6.22.1/mm/mprotect.c       2007-08-02 11:41:53.000000000 -0400
================================================================

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/SOURCES/linux-2.6-grsec_full.patch?r1=1.1.2.19&r2=1.1.2.20&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to