commit:     df5765ccf2fcc59e11b068e559e0528356afe44f
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 15 18:56:10 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Wed Jun 15 18:56:10 2016 +0000
URL:        
https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=df5765cc

grsecurity-3.1-4.5.7-201606142010

 4.5.7/0000_README                                  |    2 +-
 ...> 4420_grsecurity-3.1-4.5.7-201606142010.patch} | 1324 ++++++++++++++++----
 2 files changed, 1056 insertions(+), 270 deletions(-)

diff --git a/4.5.7/0000_README b/4.5.7/0000_README
index 67f12a7..7dd453b 100644
--- a/4.5.7/0000_README
+++ b/4.5.7/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.5.7-201606080852.patch
+Patch: 4420_grsecurity-3.1-4.5.7-201606142010.patch
 From:  http://www.grsecurity.net
 Desc:  hardened-sources base patch from upstream grsecurity
 

diff --git a/4.5.7/4420_grsecurity-3.1-4.5.7-201606080852.patch 
b/4.5.7/4420_grsecurity-3.1-4.5.7-201606142010.patch
similarity index 99%
rename from 4.5.7/4420_grsecurity-3.1-4.5.7-201606080852.patch
rename to 4.5.7/4420_grsecurity-3.1-4.5.7-201606142010.patch
index 65f5e28..b46e7cf 100644
--- a/4.5.7/4420_grsecurity-3.1-4.5.7-201606080852.patch
+++ b/4.5.7/4420_grsecurity-3.1-4.5.7-201606142010.patch
@@ -3631,6 +3631,68 @@ index 549f6d3..909a9dc 100644
        default y if ARM_ARCH_TIMER
        select GENERIC_TIME_VSYSCALL
        help
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 7d5f4c7..c6a0816 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -778,6 +778,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct 
pt_regs *regs)
+       u16 tinstr = 0;
+       int isize = 4;
+       int thumb2_32b = 0;
++      bool is_user_mode = user_mode(regs);
+ 
+       if (interrupts_enabled(regs))
+               local_irq_enable();
+@@ -786,14 +787,24 @@ do_alignment(unsigned long addr, unsigned int fsr, 
struct pt_regs *regs)
+ 
+       if (thumb_mode(regs)) {
+               u16 *ptr = (u16 *)(instrptr & ~1);
+-              fault = probe_kernel_address(ptr, tinstr);
++              if (is_user_mode) {
++                      pax_open_userland();
++                      fault = probe_kernel_address(ptr, tinstr);
++                      pax_close_userland();
++              } else
++                      fault = probe_kernel_address(ptr, tinstr);
+               tinstr = __mem_to_opcode_thumb16(tinstr);
+               if (!fault) {
+                       if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+                           IS_T32(tinstr)) {
+                               /* Thumb-2 32-bit */
+                               u16 tinst2 = 0;
+-                              fault = probe_kernel_address(ptr + 1, tinst2);
++                              if (is_user_mode) {
++                                      pax_open_userland();
++                                      fault = probe_kernel_address(ptr + 1, 
tinst2);
++                                      pax_close_userland();
++                              } else
++                                      fault = probe_kernel_address(ptr + 1, 
tinst2);
+                               tinst2 = __mem_to_opcode_thumb16(tinst2);
+                               instr = __opcode_thumb32_compose(tinstr, 
tinst2);
+                               thumb2_32b = 1;
+@@ -803,7 +814,12 @@ do_alignment(unsigned long addr, unsigned int fsr, struct 
pt_regs *regs)
+                       }
+               }
+       } else {
+-              fault = probe_kernel_address((void *)instrptr, instr);
++              if (is_user_mode) {
++                      pax_open_userland();
++                      fault = probe_kernel_address((void *)instrptr, instr);
++                      pax_close_userland();
++              } else
++                      fault = probe_kernel_address((void *)instrptr, instr);
+               instr = __mem_to_opcode_arm(instr);
+       }
+ 
+@@ -812,7 +828,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct 
pt_regs *regs)
+               goto bad_or_fault;
+       }
+ 
+-      if (user_mode(regs))
++      if (is_user_mode)
+               goto user;
+ 
+       ai_sys += 1;
 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
 index 9f9d542..5189649 100644
 --- a/arch/arm/mm/cache-l2x0.c
@@ -97446,6 +97508,123 @@ index 8580831..36166e5 100644
        retval = sysfs_create_mount_point(kernel_kobj, "debug");
        if (retval)
                return retval;
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index feef8a9..f024040 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct 
dir_context *ctx)
+               .sb = inode->i_sb,
+       };
+       lower_file = ecryptfs_file_to_lower(file);
+-      lower_file->f_pos = ctx->pos;
+       rc = iterate_dir(lower_file, &buf.ctx);
+       ctx->pos = buf.ctx.pos;
+       if (rc < 0)
+@@ -223,14 +222,6 @@ static int ecryptfs_open(struct inode *inode, struct file 
*file)
+       }
+       ecryptfs_set_file_lower(
+               file, ecryptfs_inode_to_private(inode)->lower_file);
+-      if (d_is_dir(ecryptfs_dentry)) {
+-              ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
+-              mutex_lock(&crypt_stat->cs_mutex);
+-              crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+-              mutex_unlock(&crypt_stat->cs_mutex);
+-              rc = 0;
+-              goto out;
+-      }
+       rc = read_or_initialize_metadata(ecryptfs_dentry);
+       if (rc)
+               goto out_put;
+@@ -247,6 +238,45 @@ out:
+       return rc;
+ }
+ 
++/**
++ * ecryptfs_dir_open
++ * @inode: inode speciying file to open
++ * @file: Structure to return filled in
++ *
++ * Opens the file specified by inode.
++ *
++ * Returns zero on success; non-zero otherwise
++ */
++static int ecryptfs_dir_open(struct inode *inode, struct file *file)
++{
++      struct dentry *ecryptfs_dentry = file->f_path.dentry;
++      /* Private value of ecryptfs_dentry allocated in
++       * ecryptfs_lookup() */
++      struct ecryptfs_file_info *file_info;
++      struct file *lower_file;
++
++      /* Released in ecryptfs_release or end of function if failure */
++      file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
++      ecryptfs_set_file_private(file, file_info);
++      if (unlikely(!file_info)) {
++              ecryptfs_printk(KERN_ERR,
++                              "Error attempting to allocate memory\n");
++              return -ENOMEM;
++      }
++      lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
++                               file->f_flags, current_cred());
++      if (IS_ERR(lower_file)) {
++              printk(KERN_ERR "%s: Error attempting to initialize "
++                      "the lower file for the dentry with name "
++                      "[%pd]; rc = [%ld]\n", __func__,
++                      ecryptfs_dentry, PTR_ERR(lower_file));
++              kmem_cache_free(ecryptfs_file_info_cache, file_info);
++              return PTR_ERR(lower_file);
++      }
++      ecryptfs_set_file_lower(file, lower_file);
++      return 0;
++}
++
+ static int ecryptfs_flush(struct file *file, fl_owner_t td)
+ {
+       struct file *lower_file = ecryptfs_file_to_lower(file);
+@@ -267,6 +297,19 @@ static int ecryptfs_release(struct inode *inode, struct 
file *file)
+       return 0;
+ }
+ 
++static int ecryptfs_dir_release(struct inode *inode, struct file *file)
++{
++      fput(ecryptfs_file_to_lower(file));
++      kmem_cache_free(ecryptfs_file_info_cache,
++                      ecryptfs_file_to_private(file));
++      return 0;
++}
++
++static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int 
whence)
++{
++      return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
++}
++
+ static int
+ ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+@@ -346,20 +389,16 @@ const struct file_operations ecryptfs_dir_fops = {
+ #ifdef CONFIG_COMPAT
+       .compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+-      .open = ecryptfs_open,
+-      .flush = ecryptfs_flush,
+-      .release = ecryptfs_release,
++      .open = ecryptfs_dir_open,
++      .release = ecryptfs_dir_release,
+       .fsync = ecryptfs_fsync,
+-      .fasync = ecryptfs_fasync,
+-      .splice_read = generic_file_splice_read,
+-      .llseek = default_llseek,
++      .llseek = ecryptfs_dir_llseek,
+ };
+ 
+ const struct file_operations ecryptfs_main_fops = {
+       .llseek = generic_file_llseek,
+       .read_iter = ecryptfs_read_update_atime,
+       .write_iter = generic_file_write_iter,
+-      .iterate = ecryptfs_readdir,
+       .unlocked_ioctl = ecryptfs_unlocked_ioctl,
+ #ifdef CONFIG_COMPAT
+       .compat_ioctl = ecryptfs_compat_ioctl,
 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
 index 4e685ac..462fc20 100644
 --- a/fs/ecryptfs/inode.c
@@ -97483,6 +97662,45 @@ index 6bd67e2..1d71a4b 100644
        }
        s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
        if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index 866bb18..e818f5a 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -25,6 +25,7 @@
+ #include <linux/slab.h>
+ #include <linux/wait.h>
+ #include <linux/mount.h>
++#include <linux/file.h>
+ #include "ecryptfs_kernel.h"
+ 
+ struct ecryptfs_open_req {
+@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+       flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
+       (*lower_file) = dentry_open(&req.path, flags, cred);
+       if (!IS_ERR(*lower_file))
+-              goto out;
++              goto have_file;
+       if ((flags & O_ACCMODE) == O_RDONLY) {
+               rc = PTR_ERR((*lower_file));
+               goto out;
+@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
+       mutex_unlock(&ecryptfs_kthread_ctl.mux);
+       wake_up(&ecryptfs_kthread_ctl.wait);
+       wait_for_completion(&req.done);
+-      if (IS_ERR(*lower_file))
++      if (IS_ERR(*lower_file)) {
+               rc = PTR_ERR(*lower_file);
++              goto out;
++      }
++have_file:
++      if ((*lower_file)->f_op->mmap == NULL) {
++              fput(*lower_file);
++              *lower_file = NULL;
++              rc = -EMEDIUMTYPE;
++      }
+ out:
+       return rc;
+ }
 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
 index e4141f2..d8263e8 100644
 --- a/fs/ecryptfs/miscdev.c
@@ -97497,7 +97715,7 @@ index e4141f2..d8263e8 100644
                i += packet_length_size;
                if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
 diff --git a/fs/exec.c b/fs/exec.c
-index dcd4ac7..b1bb7fa 100644
+index dcd4ac7..50eef0a 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -56,8 +56,20 @@
@@ -97835,15 +98053,7 @@ index dcd4ac7..b1bb7fa 100644
        set_fs(old_fs);
        return result;
  }
-@@ -869,6 +959,7 @@ static int exec_mmap(struct mm_struct *mm)
-       tsk->mm = mm;
-       tsk->active_mm = mm;
-       activate_mm(active_mm, mm);
-+      populate_stack();
-       tsk->mm->vmacache_seqnum = 0;
-       vmacache_flush(tsk);
-       task_unlock(tsk);
-@@ -1277,7 +1368,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1277,7 +1367,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
        }
        rcu_read_unlock();
  
@@ -97852,7 +98062,7 @@ index dcd4ac7..b1bb7fa 100644
                bprm->unsafe |= LSM_UNSAFE_SHARE;
        else
                p->fs->in_exec = 1;
-@@ -1478,6 +1569,31 @@ static int exec_binprm(struct linux_binprm *bprm)
+@@ -1478,6 +1568,31 @@ static int exec_binprm(struct linux_binprm *bprm)
        return ret;
  }
  
@@ -97884,7 +98094,7 @@ index dcd4ac7..b1bb7fa 100644
  /*
   * sys_execve() executes a new program.
   */
-@@ -1486,6 +1602,11 @@ static int do_execveat_common(int fd, struct filename 
*filename,
+@@ -1486,6 +1601,11 @@ static int do_execveat_common(int fd, struct filename 
*filename,
                              struct user_arg_ptr envp,
                              int flags)
  {
@@ -97896,7 +98106,7 @@ index dcd4ac7..b1bb7fa 100644
        char *pathbuf = NULL;
        struct linux_binprm *bprm;
        struct file *file;
-@@ -1495,6 +1616,8 @@ static int do_execveat_common(int fd, struct filename 
*filename,
+@@ -1495,6 +1615,8 @@ static int do_execveat_common(int fd, struct filename 
*filename,
        if (IS_ERR(filename))
                return PTR_ERR(filename);
  
@@ -97905,7 +98115,7 @@ index dcd4ac7..b1bb7fa 100644
        /*
         * We move the actual failure in case of RLIMIT_NPROC excess from
         * set*uid() to execve() because too many poorly written programs
-@@ -1558,6 +1681,11 @@ static int do_execveat_common(int fd, struct filename 
*filename,
+@@ -1558,6 +1680,11 @@ static int do_execveat_common(int fd, struct filename 
*filename,
        }
        bprm->interp = bprm->filename;
  
@@ -97917,7 +98127,7 @@ index dcd4ac7..b1bb7fa 100644
        retval = bprm_mm_init(bprm);
        if (retval)
                goto out_unmark;
-@@ -1574,24 +1702,70 @@ static int do_execveat_common(int fd, struct filename 
*filename,
+@@ -1574,24 +1701,70 @@ static int do_execveat_common(int fd, struct filename 
*filename,
        if (retval < 0)
                goto out;
  
@@ -97992,7 +98202,7 @@ index dcd4ac7..b1bb7fa 100644
        current->fs->in_exec = 0;
        current->in_execve = 0;
        acct_update_integrals(current);
-@@ -1603,6 +1777,14 @@ static int do_execveat_common(int fd, struct filename 
*filename,
+@@ -1603,6 +1776,14 @@ static int do_execveat_common(int fd, struct filename 
*filename,
                put_files_struct(displaced);
        return retval;
  
@@ -98007,7 +98217,7 @@ index dcd4ac7..b1bb7fa 100644
  out:
        if (bprm->mm) {
                acct_arg_size(bprm, 0);
-@@ -1749,3 +1931,319 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
+@@ -1749,3 +1930,319 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
                                  argv, envp, flags);
  }
  #endif
@@ -98163,7 +98373,7 @@ index dcd4ac7..b1bb7fa 100644
 +
 +#ifdef CONFIG_PAX_USERCOPY
 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an 
error) */
-+static noinline int check_stack_object(const void *obj, unsigned long len)
++static noinline int check_stack_object(unsigned long obj, unsigned long len)
 +{
 +      const void * const stack = task_stack_page(current);
 +      const void * const stackend = stack + THREAD_SIZE;
@@ -98176,10 +98386,10 @@ index dcd4ac7..b1bb7fa 100644
 +      if (obj + len < obj)
 +              return -1;
 +
-+      if (obj + len <= stack || stackend <= obj)
++      if (obj + len <= (unsigned long)stack || (unsigned long)stackend <= obj)
 +              return 0;
 +
-+      if (obj < stack || stackend < obj + len)
++      if (obj < (unsigned long)stack || (unsigned long)stackend < obj + len)
 +              return -1;
 +
 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
@@ -98198,8 +98408,8 @@ index dcd4ac7..b1bb7fa 100644
 +                 causing us to bail out and correctly report
 +                 the copy as invalid
 +              */
-+              if (obj + len <= frame)
-+                      return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
++              if (obj + len <= (unsigned long)frame)
++                      return obj >= (unsigned long)oldframe + 2 * sizeof(void 
*) ? 2 : -1;
 +              oldframe = frame;
 +              frame = *(const void * const *)frame;
 +      }
@@ -98280,7 +98490,7 @@ index dcd4ac7..b1bb7fa 100644
 +
 +      type = check_heap_object(ptr, n);
 +      if (!type) {
-+              int ret = check_stack_object(ptr, n);
++              int ret = check_stack_object((unsigned long)ptr, n);
 +              if (ret == 1 || ret == 2)
 +                      return;
 +              if (ret == 0) {
@@ -113366,10 +113576,24 @@ index fe5b6e6..cd2913c 100644
                        kfree(ctl_table_arg);
                        goto out;
 diff --git a/fs/proc/root.c b/fs/proc/root.c
-index 361ab4e..9720b97 100644
+index 361ab4e..55e45e9 100644
 --- a/fs/proc/root.c
 +++ b/fs/proc/root.c
-@@ -187,7 +187,15 @@ void __init proc_root_init(void)
+@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type 
*fs_type,
+       if (IS_ERR(sb))
+               return ERR_CAST(sb);
+ 
++      /*
++       * procfs isn't actually a stacking filesystem; however, there is
++       * too much magic going on inside it to permit stacking things on
++       * top of it
++       */
++      sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
++
+       if (!proc_parse_options(options, ns)) {
+               deactivate_locked_super(sb);
+               return ERR_PTR(-EINVAL);
+@@ -187,7 +194,15 @@ void __init proc_root_init(void)
        proc_create_mount_point("openprom");
  #endif
        proc_tty_init();
@@ -115023,6 +115247,157 @@ index 642d55d..d8ccf82 100644
                            xfs_dir3_get_dtype(dp->i_mount, filetype)))
                        return 0;
                sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index d7a490f..59a8848 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -91,13 +91,6 @@ xfs_inode_free_callback(
+       struct inode            *inode = container_of(head, struct inode, 
i_rcu);
+       struct xfs_inode        *ip = XFS_I(inode);
+ 
+-      kmem_zone_free(xfs_inode_zone, ip);
+-}
+-
+-void
+-xfs_inode_free(
+-      struct xfs_inode        *ip)
+-{
+       switch (ip->i_d.di_mode & S_IFMT) {
+       case S_IFREG:
+       case S_IFDIR:
+@@ -115,6 +108,25 @@ xfs_inode_free(
+               ip->i_itemp = NULL;
+       }
+ 
++      kmem_zone_free(xfs_inode_zone, ip);
++}
++
++static void
++__xfs_inode_free(
++      struct xfs_inode        *ip)
++{
++      /* asserts to verify all state is correct here */
++      ASSERT(atomic_read(&ip->i_pincount) == 0);
++      ASSERT(!xfs_isiflocked(ip));
++      XFS_STATS_DEC(ip->i_mount, vn_active);
++
++      call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
++}
++
++void
++xfs_inode_free(
++      struct xfs_inode        *ip)
++{
+       /*
+        * Because we use RCU freeing we need to ensure the inode always
+        * appears to be reclaimed with an invalid inode number when in the
+@@ -126,12 +138,7 @@ xfs_inode_free(
+       ip->i_ino = 0;
+       spin_unlock(&ip->i_flags_lock);
+ 
+-      /* asserts to verify all state is correct here */
+-      ASSERT(atomic_read(&ip->i_pincount) == 0);
+-      ASSERT(!xfs_isiflocked(ip));
+-      XFS_STATS_DEC(ip->i_mount, vn_active);
+-
+-      call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
++      __xfs_inode_free(ip);
+ }
+ 
+ /*
+@@ -741,8 +748,7 @@ __xfs_inode_set_reclaim_tag(
+       if (!pag->pag_ici_reclaimable) {
+               /* propagate the reclaim tag up into the perag radix tree */
+               spin_lock(&ip->i_mount->m_perag_lock);
+-              radix_tree_tag_set(&ip->i_mount->m_perag_tree,
+-                              XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
++              radix_tree_tag_set(&ip->i_mount->m_perag_tree, pag->pag_agno,
+                               XFS_ICI_RECLAIM_TAG);
+               spin_unlock(&ip->i_mount->m_perag_lock);
+ 
+@@ -786,8 +792,7 @@ __xfs_inode_clear_reclaim(
+       if (!pag->pag_ici_reclaimable) {
+               /* clear the reclaim tag from the perag radix tree */
+               spin_lock(&ip->i_mount->m_perag_lock);
+-              radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
+-                              XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
++              radix_tree_tag_clear(&ip->i_mount->m_perag_tree, pag->pag_agno,
+                               XFS_ICI_RECLAIM_TAG);
+               spin_unlock(&ip->i_mount->m_perag_lock);
+               trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
+@@ -898,6 +903,7 @@ xfs_reclaim_inode(
+       int                     sync_mode)
+ {
+       struct xfs_buf          *bp = NULL;
++      xfs_ino_t               ino = ip->i_ino; /* for radix_tree_delete */
+       int                     error;
+ 
+ restart:
+@@ -962,6 +968,22 @@ restart:
+ 
+       xfs_iflock(ip);
+ reclaim:
++      /*
++       * Because we use RCU freeing we need to ensure the inode always appears
++       * to be reclaimed with an invalid inode number when in the free state.
++       * We do this as early as possible under the ILOCK and flush lock so
++       * that xfs_iflush_cluster() can be guaranteed to detect races with us
++       * here. By doing this, we guarantee that once xfs_iflush_cluster has
++       * locked both the XFS_ILOCK and the flush lock that it will see either
++       * a valid, flushable inode that will serialise correctly against the
++       * locks below, or it will see a clean (and invalid) inode that it can
++       * skip.
++       */
++      spin_lock(&ip->i_flags_lock);
++      ip->i_flags = XFS_IRECLAIM;
++      ip->i_ino = 0;
++      spin_unlock(&ip->i_flags_lock);
++
+       xfs_ifunlock(ip);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 
+@@ -975,7 +997,7 @@ reclaim:
+        */
+       spin_lock(&pag->pag_ici_lock);
+       if (!radix_tree_delete(&pag->pag_ici_root,
+-                              XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
++                              XFS_INO_TO_AGINO(ip->i_mount, ino)))
+               ASSERT(0);
+       __xfs_inode_clear_reclaim(pag, ip);
+       spin_unlock(&pag->pag_ici_lock);
+@@ -992,7 +1014,7 @@ reclaim:
+       xfs_qm_dqdetach(ip);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 
+-      xfs_inode_free(ip);
++      __xfs_inode_free(ip);
+       return error;
+ 
+ out_ifunlock:
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index c738a52..658eea8 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -3259,6 +3259,19 @@ xfs_iflush_cluster(
+                       continue;
+               }
+ 
++
++              /*
++               * Check the inode number again, just to be certain we are not
++               * racing with freeing in xfs_reclaim_inode(). See the comments
++               * in that function for more information as to why the initial
++               * check is not sufficient.
++               */
++              if (!iq->i_ino) {
++                      xfs_ifunlock(iq);
++                      xfs_iunlock(iq, XFS_ILOCK_SHARED);
++                      continue;
++              }
++
+               /*
+                * arriving here means that this inode can be flushed.  First
+                * re-check that it's dirty before flushing.
 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
 index 478d04e..3d6a86b 100644
 --- a/fs/xfs/xfs_ioctl.c
@@ -132656,7 +133031,7 @@ index 556ec1e..38c19c9 100644
  
  /*
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index a10494a..9f25fd6 100644
+index a10494a..2d7faf1 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -7,7 +7,7 @@
@@ -132929,17 +133304,16 @@ index a10494a..9f25fd6 100644
  {
        return tsk->pid;
  }
-@@ -2289,6 +2397,26 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2289,6 +2397,25 @@ extern u64 sched_clock_cpu(int cpu);
  
  extern void sched_clock_init(void);
  
 +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+static inline void populate_stack(void)
++static inline void populate_stack(void *stack)
 +{
-+      struct task_struct *curtask = current;
 +      int c;
-+      int *ptr = curtask->stack;
-+      int *end = curtask->stack + THREAD_SIZE;
++      int *ptr = stack;
++      int *end = stack + THREAD_SIZE;
 +
 +      while (ptr < end) {
 +              c = *(volatile int *)ptr;
@@ -132948,7 +133322,7 @@ index a10494a..9f25fd6 100644
 +      }
 +}
 +#else
-+static inline void populate_stack(void)
++static inline void populate_stack(void *stack)
 +{
 +}
 +#endif
@@ -132956,7 +133330,7 @@ index a10494a..9f25fd6 100644
  #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  static inline void sched_clock_tick(void)
  {
-@@ -2417,7 +2545,9 @@ extern void set_curr_task(int cpu, struct task_struct 
*p);
+@@ -2417,7 +2544,9 @@ extern void set_curr_task(int cpu, struct task_struct 
*p);
  void yield(void);
  
  union thread_union {
@@ -132966,7 +133340,7 @@ index a10494a..9f25fd6 100644
        unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -2450,6 +2580,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2450,6 +2579,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -132974,7 +133348,7 @@ index a10494a..9f25fd6 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
                struct pid_namespace *ns);
  
-@@ -2481,7 +2612,7 @@ extern void proc_caches_init(void);
+@@ -2481,7 +2611,7 @@ extern void proc_caches_init(void);
  extern void flush_signals(struct task_struct *);
  extern void ignore_signals(struct task_struct *);
  extern void flush_signal_handlers(struct task_struct *, int force_default);
@@ -132983,7 +133357,7 @@ index a10494a..9f25fd6 100644
  
  static inline int kernel_dequeue_signal(siginfo_t *info)
  {
-@@ -2635,7 +2766,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2635,7 +2765,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -132992,7 +133366,7 @@ index a10494a..9f25fd6 100644
  
  extern int do_execve(struct filename *,
                     const char __user * const __user *,
-@@ -2750,11 +2881,13 @@ static inline int thread_group_empty(struct 
task_struct *p)
+@@ -2750,11 +2880,13 @@ static inline int thread_group_empty(struct 
task_struct *p)
   * It must not be nested with write_lock_irq(&tasklist_lock),
   * neither inside nor outside.
   */
@@ -133006,7 +133380,7 @@ index a10494a..9f25fd6 100644
  static inline void task_unlock(struct task_struct *p)
  {
        spin_unlock(&p->alloc_lock);
-@@ -2840,9 +2973,9 @@ static inline unsigned long *end_of_stack(struct 
task_struct *p)
+@@ -2840,9 +2972,9 @@ static inline unsigned long *end_of_stack(struct 
task_struct *p)
  #define task_stack_end_corrupted(task) \
                (*(end_of_stack(task)) != STACK_END_MAGIC)
  
@@ -138371,10 +138745,10 @@ index c112abb..49d919f 100644
                if (wo->wo_flags & __WNOTHREAD)
                        break;
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 2e391c7..555531a 100644
+index 2e391c7..4af22a9 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -188,12 +188,54 @@ static void free_thread_info(struct thread_info *ti)
+@@ -188,12 +188,55 @@ static void free_thread_info(struct thread_info *ti)
  void thread_info_cache_init(void)
  {
        thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
@@ -138405,7 +138779,8 @@ index 2e391c7..555531a 100644
 +      if (ret == NULL) {
 +              free_thread_info(*lowmem_stack);
 +              *lowmem_stack = NULL;
-+      }
++      } else
++              populate_stack(ret);
 +
 +out:
 +      return ret;
@@ -138430,7 +138805,7 @@ index 2e391c7..555531a 100644
  /* SLAB cache for signal_struct structures (tsk->signal) */
  static struct kmem_cache *signal_cachep;
  
-@@ -212,18 +254,22 @@ struct kmem_cache *vm_area_cachep;
+@@ -212,18 +255,22 @@ struct kmem_cache *vm_area_cachep;
  /* SLAB cache for mm_struct structures (tsk->mm) */
  static struct kmem_cache *mm_cachep;
  
@@ -138456,7 +138831,7 @@ index 2e391c7..555531a 100644
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
-@@ -290,7 +336,7 @@ static void set_max_threads(unsigned int 
max_threads_suggested)
+@@ -290,7 +337,7 @@ static void set_max_threads(unsigned int 
max_threads_suggested)
  
  #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
  /* Initialized by the architecture: */
@@ -138465,7 +138840,7 @@ index 2e391c7..555531a 100644
  #endif
  
  void __init fork_init(void)
-@@ -335,6 +381,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
+@@ -335,6 +382,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
  {
        struct task_struct *tsk;
        struct thread_info *ti;
@@ -138473,7 +138848,7 @@ index 2e391c7..555531a 100644
        int node = tsk_fork_get_node(orig);
        int err;
  
-@@ -342,7 +389,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
+@@ -342,7 +390,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
        if (!tsk)
                return NULL;
  
@@ -138482,7 +138857,7 @@ index 2e391c7..555531a 100644
        if (!ti)
                goto free_tsk;
  
-@@ -351,6 +398,9 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
+@@ -351,6 +399,9 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
                goto free_ti;
  
        tsk->stack = ti;
@@ -138492,7 +138867,7 @@ index 2e391c7..555531a 100644
  #ifdef CONFIG_SECCOMP
        /*
         * We must handle setting up seccomp filters once we're under
-@@ -367,7 +417,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
+@@ -367,7 +418,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
        set_task_stack_end_magic(tsk);
  
  #ifdef CONFIG_CC_STACKPROTECTOR
@@ -138501,7 +138876,7 @@ index 2e391c7..555531a 100644
  #endif
  
        /*
-@@ -382,24 +432,90 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
+@@ -382,24 +433,90 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
        tsk->task_frag.page = NULL;
        tsk->wake_q.next = NULL;
  
@@ -138596,7 +138971,7 @@ index 2e391c7..555531a 100644
  
        uprobe_start_dup_mmap();
        down_write(&oldmm->mmap_sem);
-@@ -430,52 +546,14 @@ static int dup_mmap(struct mm_struct *mm, struct 
mm_struct *oldmm)
+@@ -430,52 +547,14 @@ static int dup_mmap(struct mm_struct *mm, struct 
mm_struct *oldmm)
  
        prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -138653,7 +139028,7 @@ index 2e391c7..555531a 100644
                }
  
                /*
-@@ -507,6 +585,38 @@ static int dup_mmap(struct mm_struct *mm, struct 
mm_struct *oldmm)
+@@ -507,6 +586,38 @@ static int dup_mmap(struct mm_struct *mm, struct 
mm_struct *oldmm)
                if (retval)
                        goto out;
        }
@@ -138692,7 +139067,7 @@ index 2e391c7..555531a 100644
        /* a new mm has just been created */
        arch_dup_mmap(oldmm, mm);
        retval = 0;
-@@ -516,14 +626,6 @@ out:
+@@ -516,14 +627,6 @@ out:
        up_write(&oldmm->mmap_sem);
        uprobe_end_dup_mmap();
        return retval;
@@ -138707,7 +139082,7 @@ index 2e391c7..555531a 100644
  }
  
  static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -798,8 +900,8 @@ struct mm_struct *mm_access(struct task_struct *task, 
unsigned int mode)
+@@ -798,8 +901,8 @@ struct mm_struct *mm_access(struct task_struct *task, 
unsigned int mode)
                return ERR_PTR(err);
  
        mm = get_task_mm(task);
@@ -138718,7 +139093,7 @@ index 2e391c7..555531a 100644
                mmput(mm);
                mm = ERR_PTR(-EACCES);
        }
-@@ -1000,13 +1102,20 @@ static int copy_fs(unsigned long clone_flags, struct 
task_struct *tsk)
+@@ -1000,13 +1103,20 @@ static int copy_fs(unsigned long clone_flags, struct 
task_struct *tsk)
                        spin_unlock(&fs->lock);
                        return -EAGAIN;
                }
@@ -138740,7 +139115,7 @@ index 2e391c7..555531a 100644
        return 0;
  }
  
-@@ -1239,7 +1348,7 @@ init_task_pid(struct task_struct *task, enum pid_type 
type, struct pid *pid)
+@@ -1239,7 +1349,7 @@ init_task_pid(struct task_struct *task, enum pid_type 
type, struct pid *pid)
   * parts of the process environment (as per the clone
   * flags). The actual kick-off is left to the caller.
   */
@@ -138749,7 +139124,7 @@ index 2e391c7..555531a 100644
                                        unsigned long stack_start,
                                        unsigned long stack_size,
                                        int __user *child_tidptr,
-@@ -1310,6 +1419,9 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
+@@ -1310,6 +1420,9 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
  #endif
        retval = -EAGAIN;
@@ -138759,7 +139134,7 @@ index 2e391c7..555531a 100644
        if (atomic_read(&p->real_cred->user->processes) >=
                        task_rlimit(p, RLIMIT_NPROC)) {
                if (p->real_cred->user != INIT_USER &&
-@@ -1568,6 +1680,11 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
+@@ -1568,6 +1681,11 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
                goto bad_fork_cancel_cgroup;
        }
  
@@ -138771,7 +139146,7 @@ index 2e391c7..555531a 100644
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
  
-@@ -1657,6 +1774,8 @@ bad_fork_cleanup_count:
+@@ -1657,6 +1775,8 @@ bad_fork_cleanup_count:
  bad_fork_free:
        free_task(p);
  fork_out:
@@ -138780,7 +139155,7 @@ index 2e391c7..555531a 100644
        return ERR_PTR(retval);
  }
  
-@@ -1719,6 +1838,7 @@ long _do_fork(unsigned long clone_flags,
+@@ -1719,6 +1839,7 @@ long _do_fork(unsigned long clone_flags,
  
        p = copy_process(clone_flags, stack_start, stack_size,
                         child_tidptr, NULL, trace, tls);
@@ -138788,7 +139163,7 @@ index 2e391c7..555531a 100644
        /*
         * Do this prior waking up the new thread - the thread pointer
         * might get invalid after that point, if the thread exits quickly.
-@@ -1735,6 +1855,8 @@ long _do_fork(unsigned long clone_flags,
+@@ -1735,6 +1856,8 @@ long _do_fork(unsigned long clone_flags,
                if (clone_flags & CLONE_PARENT_SETTID)
                        put_user(nr, parent_tidptr);
  
@@ -138797,7 +139172,7 @@ index 2e391c7..555531a 100644
                if (clone_flags & CLONE_VFORK) {
                        p->vfork_done = &vfork;
                        init_completion(&vfork);
-@@ -1871,7 +1993,7 @@ void __init proc_caches_init(void)
+@@ -1871,7 +1994,7 @@ void __init proc_caches_init(void)
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
                        NULL);
@@ -138806,7 +139181,7 @@ index 2e391c7..555531a 100644
        mmap_init();
        nsproxy_cache_init();
  }
-@@ -1919,7 +2041,7 @@ static int unshare_fs(unsigned long unshare_flags, 
struct fs_struct **new_fsp)
+@@ -1919,7 +2042,7 @@ static int unshare_fs(unsigned long unshare_flags, 
struct fs_struct **new_fsp)
                return 0;
  
        /* don't need lock here; in the worst case we'll do useless copy */
@@ -138815,7 +139190,7 @@ index 2e391c7..555531a 100644
                return 0;
  
        *new_fsp = copy_fs_struct(fs);
-@@ -2032,7 +2154,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -2032,7 +2155,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                        fs = current->fs;
                        spin_lock(&fs->lock);
                        current->fs = new_fs;
@@ -138825,7 +139200,7 @@ index 2e391c7..555531a 100644
                                new_fs = NULL;
                        else
                                new_fs = fs;
-@@ -2096,7 +2219,7 @@ int unshare_files(struct files_struct **displaced)
+@@ -2096,7 +2220,7 @@ int unshare_files(struct files_struct **displaced)
  int sysctl_max_threads(struct ctl_table *table, int write,
                       void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -141676,7 +142051,7 @@ index a5d966c..9c2d28b 100644
  #ifdef CONFIG_RT_GROUP_SCHED
        /*
 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 1c1d2a0..a8b297a 100644
+index 1c1d2a0..1b7307c 100644
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
 @@ -2263,7 +2263,7 @@ void set_numabalancing_state(bool enabled)
@@ -141706,18 +142081,27 @@ index 1c1d2a0..a8b297a 100644
                next = head->next;
                head->next = NULL;
                head = next;
-@@ -2784,8 +2784,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
+@@ -2784,8 +2784,9 @@ context_switch(struct rq *rq, struct task_struct *prev,
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
 -      } else
 +      } else {
                switch_mm(oldmm, mm, next);
-+              populate_stack();
 +      }
  
        if (!prev->mm) {
                prev->active_mm = NULL;
+@@ -3109,7 +3110,8 @@ static noinline void __schedule_bug(struct task_struct 
*prev)
+ static inline void schedule_debug(struct task_struct *prev)
+ {
+ #ifdef CONFIG_SCHED_STACK_END_CHECK
+-      BUG_ON(task_stack_end_corrupted(prev));
++      if (task_stack_end_corrupted(prev))
++              panic("corrupted stack end detected inside scheduler\n");
+ #endif
+ 
+       if (unlikely(in_atomic_preempt_off())) {
 @@ -3609,6 +3611,8 @@ int can_nice(const struct task_struct *p, const int nice)
        /* convert nice value [19,-20] to rlimit style value [1,40] */
        int nice_rlim = nice_to_rlimit(nice);
@@ -141745,15 +142129,7 @@ index 1c1d2a0..a8b297a 100644
                        /* can't increase priority */
                        if (attr->sched_priority > p->rt_priority &&
                            attr->sched_priority > rlim_rtprio)
-@@ -5285,6 +5291,7 @@ void idle_task_exit(void)
- 
-       if (mm != &init_mm) {
-               switch_mm(mm, &init_mm, current);
-+              populate_stack();
-               finish_arch_post_lock_switch();
-       }
-       mmdrop(mm);
-@@ -5410,7 +5417,7 @@ static void migrate_tasks(struct rq *dead_rq)
+@@ -5410,7 +5416,7 @@ static void migrate_tasks(struct rq *dead_rq)
  
  #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
  
@@ -141762,7 +142138,7 @@ index 1c1d2a0..a8b297a 100644
        {
                .procname       = "sched_domain",
                .mode           = 0555,
-@@ -5427,17 +5434,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -5427,17 +5433,17 @@ static struct ctl_table sd_ctl_root[] = {
        {}
  };
  
@@ -141784,7 +142160,7 @@ index 1c1d2a0..a8b297a 100644
  
        /*
         * In the intermediate directories, both the child directory and
-@@ -5445,22 +5452,25 @@ static void sd_free_ctl_entry(struct ctl_table 
**tablep)
+@@ -5445,22 +5451,25 @@ static void sd_free_ctl_entry(struct ctl_table 
**tablep)
         * will always be set. In the lowest directory the names are
         * static strings and all have proc handlers.
         */
@@ -141816,7 +142192,7 @@ index 1c1d2a0..a8b297a 100644
                const char *procname, void *data, int maxlen,
                umode_t mode, proc_handler *proc_handler,
                bool load_idx)
-@@ -5480,7 +5490,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -5480,7 +5489,7 @@ set_table_entry(struct ctl_table *entry,
  static struct ctl_table *
  sd_alloc_ctl_domain_table(struct sched_domain *sd)
  {
@@ -141825,7 +142201,7 @@ index 1c1d2a0..a8b297a 100644
  
        if (table == NULL)
                return NULL;
-@@ -5518,9 +5528,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -5518,9 +5527,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
        return table;
  }
  
@@ -141837,7 +142213,7 @@ index 1c1d2a0..a8b297a 100644
        struct sched_domain *sd;
        int domain_num = 0, i;
        char buf[32];
-@@ -5547,11 +5557,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -5547,11 +5556,13 @@ static struct ctl_table_header *sd_sysctl_header;
  static void register_sched_domain_sysctl(void)
  {
        int i, cpu_num = num_possible_cpus();
@@ -141852,7 +142228,7 @@ index 1c1d2a0..a8b297a 100644
  
        if (entry == NULL)
                return;
-@@ -5573,8 +5585,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -5573,8 +5584,12 @@ static void unregister_sched_domain_sysctl(void)
  {
        unregister_sysctl_table(sd_sysctl_header);
        sd_sysctl_header = NULL;
@@ -142205,10 +142581,18 @@ index d903c02..c3efd35 100644
        unsigned long flags;
        int ret = 0;
 diff --git a/kernel/smpboot.c b/kernel/smpboot.c
-index d264f59..48b8da3 100644
+index d264f59..fd4da04 100644
 --- a/kernel/smpboot.c
 +++ b/kernel/smpboot.c
-@@ -301,7 +301,7 @@ int smpboot_register_percpu_thread_cpumask(struct 
smp_hotplug_thread *plug_threa
+@@ -13,6 +13,7 @@
+ #include <linux/percpu.h>
+ #include <linux/kthread.h>
+ #include <linux/smpboot.h>
++#include <asm/pgtable.h>
+ 
+ #include "smpboot.h"
+ 
+@@ -301,7 +302,7 @@ int smpboot_register_percpu_thread_cpumask(struct 
smp_hotplug_thread *plug_threa
                if (cpumask_test_cpu(cpu, cpumask))
                        smpboot_unpark_thread(plug_thread, cpu);
        }
@@ -142217,7 +142601,7 @@ index d264f59..48b8da3 100644
  out:
        mutex_unlock(&smpboot_threads_lock);
        put_online_cpus();
-@@ -319,7 +319,7 @@ void smpboot_unregister_percpu_thread(struct 
smp_hotplug_thread *plug_thread)
+@@ -319,7 +320,7 @@ void smpboot_unregister_percpu_thread(struct 
smp_hotplug_thread *plug_thread)
  {
        get_online_cpus();
        mutex_lock(&smpboot_threads_lock);
@@ -142226,6 +142610,16 @@ index d264f59..48b8da3 100644
        smpboot_destroy_threads(plug_thread);
        mutex_unlock(&smpboot_threads_lock);
        put_online_cpus();
+@@ -359,7 +360,9 @@ int smpboot_update_cpumask_percpu_thread(struct 
smp_hotplug_thread *plug_thread,
+       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+               smpboot_unpark_thread(plug_thread, cpu);
+ 
++      pax_open_kernel();
+       cpumask_copy(old, new);
++      pax_close_kernel();
+ 
+       mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
 diff --git a/kernel/softirq.c b/kernel/softirq.c
 index 479e443..4072c49 100644
 --- a/kernel/softirq.c
@@ -148702,9 +149096,18 @@ index 1d11790..1cc6074 100644
                spin_unlock_irqrestore(&zone->lock, flags);
        }
 diff --git a/mm/percpu.c b/mm/percpu.c
-index 998607a..4854f93 100644
+index 998607a..389e6ba 100644
 --- a/mm/percpu.c
 +++ b/mm/percpu.c
+@@ -110,7 +110,7 @@ struct pcpu_chunk {
+       int                     map_used;       /* # of map entries used before 
the sentry */
+       int                     map_alloc;      /* # of map entries allocated */
+       int                     *map;           /* allocation map */
+-      struct work_struct      map_extend_work;/* async ->map[] extension */
++      struct list_head        map_extend_list;/* on pcpu_map_extend_chunks */
+ 
+       void                    *data;          /* chunk data */
+       int                     first_free;     /* no free below this */
 @@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
  static unsigned int pcpu_high_unit_cpu __read_mostly;
  
@@ -148714,6 +149117,192 @@ index 998607a..4854f93 100644
  EXPORT_SYMBOL_GPL(pcpu_base_addr);
  
  static const int *pcpu_unit_map __read_mostly;                /* cpu -> unit 
*/
+@@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
+ static int pcpu_reserved_chunk_limit;
+ 
+ static DEFINE_SPINLOCK(pcpu_lock);    /* all internal data structures */
+-static DEFINE_MUTEX(pcpu_alloc_mutex);        /* chunk create/destroy, 
[de]pop */
++static DEFINE_MUTEX(pcpu_alloc_mutex);        /* chunk create/destroy, 
[de]pop, map ext */
+ 
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+ 
++/* chunks which need their map areas extended, protected by pcpu_lock */
++static LIST_HEAD(pcpu_map_extend_chunks);
++
+ /*
+  * The number of empty populated pages, protected by pcpu_lock.  The
+  * reserved chunk doesn't contribute to the count.
+@@ -393,13 +396,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, 
bool is_atomic)
+ {
+       int margin, new_alloc;
+ 
++      lockdep_assert_held(&pcpu_lock);
++
+       if (is_atomic) {
+               margin = 3;
+ 
+               if (chunk->map_alloc <
+-                  chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
+-                  pcpu_async_enabled)
+-                      schedule_work(&chunk->map_extend_work);
++                  chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
++                      if (list_empty(&chunk->map_extend_list)) {
++                              list_add_tail(&chunk->map_extend_list,
++                                            &pcpu_map_extend_chunks);
++                              pcpu_schedule_balance_work();
++                      }
++              }
+       } else {
+               margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
+       }
+@@ -433,6 +442,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, 
int new_alloc)
+       size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+       unsigned long flags;
+ 
++      lockdep_assert_held(&pcpu_alloc_mutex);
++
+       new = pcpu_mem_zalloc(new_size);
+       if (!new)
+               return -ENOMEM;
+@@ -465,20 +476,6 @@ out_unlock:
+       return 0;
+ }
+ 
+-static void pcpu_map_extend_workfn(struct work_struct *work)
+-{
+-      struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
+-                                              map_extend_work);
+-      int new_alloc;
+-
+-      spin_lock_irq(&pcpu_lock);
+-      new_alloc = pcpu_need_to_extend(chunk, false);
+-      spin_unlock_irq(&pcpu_lock);
+-
+-      if (new_alloc)
+-              pcpu_extend_area_map(chunk, new_alloc);
+-}
+-
+ /**
+  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
+  * @chunk: chunk the candidate area belongs to
+@@ -738,7 +735,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
+       chunk->map_used = 1;
+ 
+       INIT_LIST_HEAD(&chunk->list);
+-      INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
++      INIT_LIST_HEAD(&chunk->map_extend_list);
+       chunk->free_size = pcpu_unit_size;
+       chunk->contig_hint = pcpu_unit_size;
+ 
+@@ -893,6 +890,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t 
align, bool reserved,
+               return NULL;
+       }
+ 
++      if (!is_atomic)
++              mutex_lock(&pcpu_alloc_mutex);
++
+       spin_lock_irqsave(&pcpu_lock, flags);
+ 
+       /* serve reserved allocations from the reserved chunk if available */
+@@ -965,12 +965,9 @@ restart:
+       if (is_atomic)
+               goto fail;
+ 
+-      mutex_lock(&pcpu_alloc_mutex);
+-
+       if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+               chunk = pcpu_create_chunk();
+               if (!chunk) {
+-                      mutex_unlock(&pcpu_alloc_mutex);
+                       err = "failed to allocate new chunk";
+                       goto fail;
+               }
+@@ -981,7 +978,6 @@ restart:
+               spin_lock_irqsave(&pcpu_lock, flags);
+       }
+ 
+-      mutex_unlock(&pcpu_alloc_mutex);
+       goto restart;
+ 
+ area_found:
+@@ -991,8 +987,6 @@ area_found:
+       if (!is_atomic) {
+               int page_start, page_end, rs, re;
+ 
+-              mutex_lock(&pcpu_alloc_mutex);
+-
+               page_start = PFN_DOWN(off);
+               page_end = PFN_UP(off + size);
+ 
+@@ -1003,7 +997,6 @@ area_found:
+ 
+                       spin_lock_irqsave(&pcpu_lock, flags);
+                       if (ret) {
+-                              mutex_unlock(&pcpu_alloc_mutex);
+                               pcpu_free_area(chunk, off, &occ_pages);
+                               err = "failed to populate";
+                               goto fail_unlock;
+@@ -1043,6 +1036,8 @@ fail:
+               /* see the flag handling in pcpu_blance_workfn() */
+               pcpu_atomic_alloc_failed = true;
+               pcpu_schedule_balance_work();
++      } else {
++              mutex_unlock(&pcpu_alloc_mutex);
+       }
+       return NULL;
+ }
+@@ -1127,6 +1122,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
+               if (chunk == list_first_entry(free_head, struct pcpu_chunk, 
list))
+                       continue;
+ 
++              list_del_init(&chunk->map_extend_list);
+               list_move(&chunk->list, &to_free);
+       }
+ 
+@@ -1144,6 +1140,25 @@ static void pcpu_balance_workfn(struct work_struct 
*work)
+               pcpu_destroy_chunk(chunk);
+       }
+ 
++      /* service chunks which requested async area map extension */
++      do {
++              int new_alloc = 0;
++
++              spin_lock_irq(&pcpu_lock);
++
++              chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
++                                      struct pcpu_chunk, map_extend_list);
++              if (chunk) {
++                      list_del_init(&chunk->map_extend_list);
++                      new_alloc = pcpu_need_to_extend(chunk, false);
++              }
++
++              spin_unlock_irq(&pcpu_lock);
++
++              if (new_alloc)
++                      pcpu_extend_area_map(chunk, new_alloc);
++      } while (chunk);
++
+       /*
+        * Ensure there are certain number of free populated pages for
+        * atomic allocs.  Fill up from the most packed so that atomic
+@@ -1642,7 +1657,7 @@ int __init pcpu_setup_first_chunk(const struct 
pcpu_alloc_info *ai,
+        */
+       schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+       INIT_LIST_HEAD(&schunk->list);
+-      INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
++      INIT_LIST_HEAD(&schunk->map_extend_list);
+       schunk->base_addr = base_addr;
+       schunk->map = smap;
+       schunk->map_alloc = ARRAY_SIZE(smap);
+@@ -1671,7 +1686,7 @@ int __init pcpu_setup_first_chunk(const struct 
pcpu_alloc_info *ai,
+       if (dyn_size) {
+               dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+               INIT_LIST_HEAD(&dchunk->list);
+-              INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
++              INIT_LIST_HEAD(&dchunk->map_extend_list);
+               dchunk->base_addr = base_addr;
+               dchunk->map = dmap;
+               dchunk->map_alloc = ARRAY_SIZE(dmap);
 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
 index 5d453e5..4043093 100644
 --- a/mm/process_vm_access.c
@@ -148966,7 +149555,7 @@ index 440e2a7..9091191 100644
                return -ENOMEM;
  
 diff --git a/mm/slab.c b/mm/slab.c
-index 621fbcb..272a1f3 100644
+index 621fbcb..9bf872e 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -116,6 +116,7 @@
@@ -149039,7 +149628,16 @@ index 621fbcb..272a1f3 100644
  
                /*
                 * Adjust the object sizes so that we clear
-@@ -3367,6 +3372,20 @@ static inline void __cache_free(struct kmem_cache 
*cachep, void *objp,
+@@ -2123,6 +2128,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned 
long flags)
+               BUG_ON(flags & SLAB_POISON);
+ #endif
+ 
++      flags = pax_sanitize_slab_flags(flags);
++
+       /*
+        * Check that size is in terms of words.  This is needed to avoid
+        * unaligned accesses for some archs when redzoning is used, and makes
+@@ -3367,6 +3374,20 @@ static inline void __cache_free(struct kmem_cache 
*cachep, void *objp,
        struct array_cache *ac = cpu_cache_get(cachep);
  
        check_irq_off();
@@ -149060,7 +149658,7 @@ index 621fbcb..272a1f3 100644
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, caller);
  
-@@ -3492,7 +3511,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, 
unsigned long caller)
+@@ -3492,7 +3513,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, 
unsigned long caller)
        return kmem_cache_alloc_node_trace(cachep, flags, node, size);
  }
  
@@ -149069,7 +149667,7 @@ index 621fbcb..272a1f3 100644
  {
        return __do_kmalloc_node(size, flags, node, _RET_IP_);
  }
-@@ -3512,7 +3531,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
+@@ -3512,7 +3533,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
   * @flags: the type of memory to allocate (see kmalloc).
   * @caller: function caller for debug tracking of the caller
   */
@@ -149078,7 +149676,7 @@ index 621fbcb..272a1f3 100644
                                          unsigned long caller)
  {
        struct kmem_cache *cachep;
-@@ -3585,6 +3604,7 @@ void kfree(const void *objp)
+@@ -3585,6 +3606,7 @@ void kfree(const void *objp)
  
        if (unlikely(ZERO_OR_NULL_PTR(objp)))
                return;
@@ -149086,7 +149684,7 @@ index 621fbcb..272a1f3 100644
        local_irq_save(flags);
        kfree_debugcheck(objp);
        c = virt_to_cache(objp);
-@@ -4004,14 +4024,22 @@ void slabinfo_show_stats(struct seq_file *m, struct 
kmem_cache *cachep)
+@@ -4004,14 +4026,22 @@ void slabinfo_show_stats(struct seq_file *m, struct 
kmem_cache *cachep)
        }
        /* cpu stats */
        {
@@ -149113,7 +149711,7 @@ index 621fbcb..272a1f3 100644
  #endif
  }
  
-@@ -4219,13 +4247,80 @@ static const struct file_operations 
proc_slabstats_operations = {
+@@ -4219,13 +4249,80 @@ static const struct file_operations 
proc_slabstats_operations = {
  static int __init slab_proc_init(void)
  {
  #ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -149196,7 +149794,7 @@ index 621fbcb..272a1f3 100644
   * ksize - get the actual amount of memory allocated for a given object
   * @objp: Pointer to the object
 diff --git a/mm/slab.h b/mm/slab.h
-index 2eedace..cd94091 100644
+index 2eedace..7de6f9b 100644
 --- a/mm/slab.h
 +++ b/mm/slab.h
 @@ -22,7 +22,7 @@ struct kmem_cache {
@@ -149208,7 +149806,7 @@ index 2eedace..cd94091 100644
        void (*ctor)(void *);   /* Called on object slot creation */
        struct list_head list;  /* List of all slab caches on the system */
  };
-@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
+@@ -66,6 +66,35 @@ extern struct list_head slab_caches;
  /* The slab cache that manages slab cache information */
  extern struct kmem_cache *kmem_cache;
  
@@ -149223,13 +149821,28 @@ index 2eedace..cd94091 100644
 +      PAX_SANITIZE_SLAB_FAST,
 +      PAX_SANITIZE_SLAB_FULL,
 +};
++
 +extern enum pax_sanitize_mode pax_sanitize_slab;
++
++static inline unsigned long pax_sanitize_slab_flags(unsigned long flags)
++{
++      if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & 
SLAB_DESTROY_BY_RCU))
++              flags |= SLAB_NO_SANITIZE;
++      else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++              flags &= ~SLAB_NO_SANITIZE;
++      return flags;
++}
++#else
++static inline unsigned long pax_sanitize_slab_flags(unsigned long flags)
++{
++      return flags;
++}
 +#endif
 +
  unsigned long calculate_alignment(unsigned long flags,
                unsigned long align, unsigned long size);
  
-@@ -115,7 +129,8 @@ static inline unsigned long kmem_cache_flags(unsigned long 
object_size,
+@@ -115,7 +144,8 @@ static inline unsigned long kmem_cache_flags(unsigned long 
object_size,
  
  /* Legal flag mask for kmem_cache_create(), for various configurations */
  #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
@@ -149239,7 +149852,7 @@ index 2eedace..cd94091 100644
  
  #if defined(CONFIG_DEBUG_SLAB)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-@@ -311,6 +326,9 @@ static inline struct kmem_cache *cache_from_obj(struct 
kmem_cache *s, void *x)
+@@ -311,6 +341,9 @@ static inline struct kmem_cache *cache_from_obj(struct 
kmem_cache *s, void *x)
                return s;
  
        page = virt_to_head_page(x);
@@ -149250,7 +149863,7 @@ index 2eedace..cd94091 100644
        if (slab_equal_or_root(cachep, s))
                return cachep;
 diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 065b7bd..3c2c410 100644
+index 065b7bd..185af36 100644
 --- a/mm/slab_common.c
 +++ b/mm/slab_common.c
 @@ -25,11 +25,35 @@
@@ -149317,21 +149930,7 @@ index 065b7bd..3c2c410 100644
        list_add(&s->list, &slab_caches);
  out:
        if (err)
-@@ -408,6 +432,13 @@ kmem_cache_create(const char *name, size_t size, size_t 
align,
-        */
-       flags &= CACHE_CREATE_MASK;
- 
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+      if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & 
SLAB_DESTROY_BY_RCU))
-+              flags |= SLAB_NO_SANITIZE;
-+      else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
-+              flags &= ~SLAB_NO_SANITIZE;
-+#endif
-+
-       s = __kmem_cache_alias(name, size, align, flags, ctor);
-       if (s)
-               goto out_unlock;
-@@ -469,7 +500,7 @@ static void release_caches(struct list_head *release, bool 
need_rcu_barrier)
+@@ -469,7 +493,7 @@ static void release_caches(struct list_head *release, bool 
need_rcu_barrier)
                rcu_barrier();
  
        list_for_each_entry_safe(s, s2, release, list) {
@@ -149340,7 +149939,7 @@ index 065b7bd..3c2c410 100644
                sysfs_slab_remove(s);
  #else
                slab_kmem_cache_release(s);
-@@ -713,8 +744,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+@@ -713,8 +737,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
  
        mutex_lock(&slab_mutex);
  
@@ -149350,7 +149949,7 @@ index 065b7bd..3c2c410 100644
                goto out_unlock;
  
        err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
-@@ -780,7 +810,7 @@ void __init create_boot_cache(struct kmem_cache *s, const 
char *name, size_t siz
+@@ -780,7 +803,7 @@ void __init create_boot_cache(struct kmem_cache *s, const 
char *name, size_t siz
                panic("Creation of kmalloc slab %s size=%zu failed. Reason 
%d\n",
                                        name, size, err);
  
@@ -149359,7 +149958,7 @@ index 065b7bd..3c2c410 100644
  }
  
  struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
-@@ -793,7 +823,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char 
*name, size_t size,
+@@ -793,7 +816,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char 
*name, size_t size,
  
        create_boot_cache(s, name, size, flags);
        list_add(&s->list, &slab_caches);
@@ -149368,7 +149967,7 @@ index 065b7bd..3c2c410 100644
        return s;
  }
  
-@@ -805,6 +835,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH 
+ 1];
+@@ -805,6 +828,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH 
+ 1];
  EXPORT_SYMBOL(kmalloc_dma_caches);
  #endif
  
@@ -149380,7 +149979,7 @@ index 065b7bd..3c2c410 100644
  /*
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
-@@ -869,6 +904,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+@@ -869,6 +897,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
                return kmalloc_dma_caches[index];
  
  #endif
@@ -149394,7 +149993,7 @@ index 065b7bd..3c2c410 100644
        return kmalloc_caches[index];
  }
  
-@@ -961,7 +1003,7 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -961,7 +996,7 @@ void __init create_kmalloc_caches(unsigned long flags)
  
        for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
                if (!kmalloc_caches[i])
@@ -149403,7 +150002,7 @@ index 065b7bd..3c2c410 100644
  
                /*
                 * Caches that are not of the two-to-the-power-of size.
-@@ -969,9 +1011,9 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -969,9 +1004,9 @@ void __init create_kmalloc_caches(unsigned long flags)
                 * earlier power of two caches
                 */
                if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
@@ -149415,7 +150014,7 @@ index 065b7bd..3c2c410 100644
        }
  
        /* Kmalloc array is now usable */
-@@ -992,6 +1034,23 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -992,6 +1027,23 @@ void __init create_kmalloc_caches(unsigned long flags)
                }
        }
  #endif
@@ -149439,7 +150038,7 @@ index 065b7bd..3c2c410 100644
  }
  #endif /* !CONFIG_SLOB */
  
-@@ -1051,6 +1110,9 @@ static void print_slabinfo_header(struct seq_file *m)
+@@ -1051,6 +1103,9 @@ static void print_slabinfo_header(struct seq_file *m)
        seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
                 "<error> <maxfreeable> <nodeallocs> <remotefrees> 
<alienoverflow>");
        seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
@@ -149449,7 +150048,7 @@ index 065b7bd..3c2c410 100644
  #endif
        seq_putc(m, '\n');
  }
-@@ -1180,7 +1242,7 @@ static int __init slab_proc_init(void)
+@@ -1180,7 +1235,7 @@ static int __init slab_proc_init(void)
  module_init(slab_proc_init);
  #endif /* CONFIG_SLABINFO */
  
@@ -149459,7 +150058,7 @@ index 065b7bd..3c2c410 100644
  {
        void *ret;
 diff --git a/mm/slob.c b/mm/slob.c
-index 5ec1580..017a002 100644
+index 5ec1580..93f3beb 100644
 --- a/mm/slob.c
 +++ b/mm/slob.c
 @@ -67,6 +67,7 @@
@@ -149641,7 +150240,7 @@ index 5ec1580..017a002 100644
  {
        return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
  }
-@@ -491,34 +516,123 @@ void kfree(const void *block)
+@@ -491,39 +516,130 @@ void kfree(const void *block)
                return;
        kmemleak_free(block);
  
@@ -149774,7 +150373,14 @@ index 5ec1580..017a002 100644
  }
  EXPORT_SYMBOL(ksize);
  
-@@ -534,23 +648,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned 
long flags)
+ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+ {
++      flags = pax_sanitize_slab_flags(flags);
++
+       if (flags & SLAB_DESTROY_BY_RCU) {
+               /* leave room for rcu footer at the end of object */
+               c->size += sizeof(struct slob_rcu);
+@@ -534,23 +650,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned 
long flags)
  
  static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
  {
@@ -149810,7 +150416,7 @@ index 5ec1580..017a002 100644
  
        if (b && c->ctor)
                c->ctor(b);
-@@ -566,7 +690,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t 
flags)
+@@ -566,7 +692,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t 
flags)
  EXPORT_SYMBOL(kmem_cache_alloc);
  
  #ifdef CONFIG_NUMA
@@ -149819,7 +150425,7 @@ index 5ec1580..017a002 100644
  {
        return __do_kmalloc_node(size, gfp, node, _RET_IP_);
  }
-@@ -579,12 +703,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, 
gfp_t gfp, int node)
+@@ -579,12 +705,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, 
gfp_t gfp, int node)
  EXPORT_SYMBOL(kmem_cache_alloc_node);
  #endif
  
@@ -149840,7 +150446,7 @@ index 5ec1580..017a002 100644
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
-@@ -592,22 +720,36 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -592,22 +722,36 @@ static void kmem_rcu_free(struct rcu_head *head)
        struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
        void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
@@ -149882,7 +150488,7 @@ index 5ec1580..017a002 100644
  EXPORT_SYMBOL(kmem_cache_free);
  
 diff --git a/mm/slub.c b/mm/slub.c
-index 2a722e1..1e5e2f8 100644
+index 2a722e1..af44068 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
 @@ -34,6 +34,7 @@
@@ -149911,16 +150517,17 @@ index 2a722e1..1e5e2f8 100644
               s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
  #ifdef CONFIG_STACKTRACE
        {
-@@ -2767,6 +2768,21 @@ static __always_inline void slab_free(struct kmem_cache 
*s, struct page *page,
+@@ -2767,6 +2768,22 @@ static __always_inline void slab_free(struct kmem_cache 
*s, struct page *page,
  
        slab_free_freelist_hook(s, head, tail);
  
 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
 +      if (!(s->flags & SLAB_NO_SANITIZE)) {
++              int offset = s->offset ? 0 : sizeof(void *);
 +              void *x = head;
 +
 +              while (1) {
-+                      memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
++                      memset(x + offset, PAX_MEMORY_SANITIZE_VALUE, 
s->object_size - offset);
 +                      if (s->ctor)
 +                              s->ctor(x);
 +                      if (x == tail_obj)
@@ -149933,17 +150540,7 @@ index 2a722e1..1e5e2f8 100644
  redo:
        /*
         * Determine the currently cpus per cpu slab.
-@@ -3264,6 +3280,9 @@ static int calculate_sizes(struct kmem_cache *s, int 
forced_order)
-       s->inuse = size;
- 
-       if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+              (!(flags & SLAB_NO_SANITIZE)) ||
-+#endif
-               s->ctor)) {
-               /*
-                * Relocate free pointer after the object if it is not
-@@ -3514,7 +3533,7 @@ static int __init setup_slub_min_objects(char *str)
+@@ -3514,7 +3531,7 @@ static int __init setup_slub_min_objects(char *str)
  
  __setup("slub_min_objects=", setup_slub_min_objects);
  
@@ -149952,7 +150549,7 @@ index 2a722e1..1e5e2f8 100644
  {
        struct kmem_cache *s;
        void *ret;
-@@ -3552,7 +3571,7 @@ static void *kmalloc_large_node(size_t size, gfp_t 
flags, int node)
+@@ -3552,7 +3569,7 @@ static void *kmalloc_large_node(size_t size, gfp_t 
flags, int node)
        return ptr;
  }
  
@@ -149961,7 +150558,7 @@ index 2a722e1..1e5e2f8 100644
  {
        struct kmem_cache *s;
        void *ret;
-@@ -3600,6 +3619,70 @@ static size_t __ksize(const void *object)
+@@ -3600,6 +3617,70 @@ static size_t __ksize(const void *object)
        return slab_ksize(page->slab_cache);
  }
  
@@ -150032,7 +150629,7 @@ index 2a722e1..1e5e2f8 100644
  size_t ksize(const void *object)
  {
        size_t size = __ksize(object);
-@@ -3620,6 +3703,7 @@ void kfree(const void *x)
+@@ -3620,6 +3701,7 @@ void kfree(const void *x)
        if (unlikely(ZERO_OR_NULL_PTR(x)))
                return;
  
@@ -150040,7 +150637,7 @@ index 2a722e1..1e5e2f8 100644
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
                BUG_ON(!PageCompound(page));
-@@ -3937,7 +4021,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t 
align,
+@@ -3937,7 +4019,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t 
align,
  
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
@@ -150049,7 +150646,7 @@ index 2a722e1..1e5e2f8 100644
  
                /*
                 * Adjust the object sizes so that we clear
-@@ -3953,7 +4037,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t 
align,
+@@ -3953,7 +4035,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t 
align,
                }
  
                if (sysfs_slab_alias(s, name)) {
@@ -150058,6 +150655,15 @@ index 2a722e1..1e5e2f8 100644
                        s = NULL;
                }
        }
+@@ -3965,6 +4047,8 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned 
long flags)
+ {
+       int err;
+ 
++      flags = pax_sanitize_slab_flags(flags);
++
+       err = kmem_cache_open(s, flags);
+       if (err)
+               return err;
 @@ -4070,7 +4154,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t 
gfpflags,
  }
  #endif
@@ -165468,12 +166074,13 @@ index 0000000..e31e92f
 +}
 diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c 
b/scripts/gcc-plugins/latent_entropy_plugin.c
 new file mode 100644
-index 0000000..f08a221
+index 0000000..be3978c
 --- /dev/null
 +++ b/scripts/gcc-plugins/latent_entropy_plugin.c
-@@ -0,0 +1,438 @@
+@@ -0,0 +1,613 @@
 +/*
 + * Copyright 2012-2016 by the PaX Team <[email protected]>
++ * Copyright 2016 by Emese Revfy <[email protected]>
 + * Licensed under the GPL v2
 + *
 + * Note: the choice of the license means that the compilation process is
@@ -165481,32 +166088,89 @@ index 0000000..f08a221
 + *       but for the kernel it doesn't matter since it doesn't link against
 + *       any of the gcc libraries
 + *
-+ * gcc plugin to help generate a little bit of entropy from program state,
-+ * used throughout the uptime of the kernel
++ * This gcc plugin helps generate a little bit of entropy from program state,
++ * used throughout the uptime of the kernel. Here is an instrumentation 
example:
++ *
++ * before:
++ * void __latent_entropy test(int argc, char *argv[])
++ * {
++ *    printf("%u %s\n", argc, *argv);
++ * }
++ *
++ * after:
++ * void __latent_entropy test(int argc, char *argv[])
++ * {
++ *    // latent_entropy_execute() 1.
++ *    unsigned long local_entropy;
++ *    // init_local_entropy() 1.
++ *    void *local_entropy_frame_addr;
++ *    // init_local_entropy() 3.
++ *    unsigned long temp_latent_entropy;
++ *
++ *    // init_local_entropy() 2.
++ *    local_entropy_frame_addr = __builtin_frame_address(0);
++ *    local_entropy = (unsigned long) local_entropy_frame_addr;
++ *
++ *    // init_local_entropy() 4.
++ *    temp_latent_entropy = latent_entropy;
++ *    // init_local_entropy() 5.
++ *    local_entropy ^= temp_latent_entropy;
++ *
++ *    // latent_entropy_execute() 3.
++ *    local_entropy += 4623067384293424948;
++ *
++ *    printf("%u %s\n", argc, *argv);
++ *
++ *    // latent_entropy_execute() 4.
++ *    temp_latent_entropy = rol(temp_latent_entropy, local_entropy);
++ *    latent_entropy = temp_latent_entropy;
++ * }
++ *
++ * It would look like this in C:
++ *
++ * unsigned long local_entropy = latent_entropy;
++ * local_entropy ^= 1234567890;
++ * local_entropy ^= (unsigned long)__builtin_frame_address(0);
++ * local_entropy += 9876543210;
++ * latent_entropy = rol(local_entropy, 6);
 + *
 + * TODO:
 + * - add ipa pass to identify not explicitly marked candidate functions
-+ * - mix in more program state (function arguments/return values, loop 
variables, etc)
++ * - mix in more program state (function arguments/return values,
++ *   loop variables, etc)
 + * - more instrumentation control via attribute parameters
 + *
 + * BUGS:
 + * - none known
++ *
++ * Options:
++ * -fplugin-arg-latent_entropy_plugin-disable
++ *
++ * Attribute: __attribute__((latent_entropy))
++ *  The latent_entropy gcc attribute can be only on functions and variables.
++ *  If it is on a function then the plugin will instrument it. If the 
attribute
++ *  is on a variable then the plugin will initialize it with a random value.
++ *  The variable must be an integer, an integer array type or a structure
++ *  with integer fields.
 + */
 +
 +#include "gcc-common.h"
 +
 +int plugin_is_GPL_compatible;
 +
-+static bool enabled = true;
-+
 +static GTY(()) tree latent_entropy_decl;
 +
 +static struct plugin_info latent_entropy_plugin_info = {
-+      .version        = "201605212030",
++      .version        = "201606141920",
 +      .help           = "disable\tturn off latent entropy instrumentation\n",
 +};
 +
 +static unsigned HOST_WIDE_INT seed;
++/*
++ * get_random_seed() (this is a GCC function) generates the seed.
++ * This is a simple random generator without any cryptographic security 
because
++ * the entropy doesn't come from here.
++ */
 +static unsigned HOST_WIDE_INT get_random_const(void)
 +{
 +      unsigned int i;
@@ -165522,10 +166186,21 @@ index 0000000..f08a221
 +      return ret;
 +}
 +
-+static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, 
int flags, bool *no_add_attrs)
++static tree tree_get_random_const(tree type)
 +{
-+      tree type;
 +      unsigned long long mask;
++
++      mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
++      mask = 2 * (mask - 1) + 1;
++
++      if (TYPE_UNSIGNED(type))
++              return build_int_cstu(type, mask & get_random_const());
++      return build_int_cst(type, mask & get_random_const());
++}
++
++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args 
__unused, int flags __unused, bool *no_add_attrs)
++{
++      tree type;
 +#if BUILDING_GCC_VERSION <= 4007
 +      VEC(constructor_elt, gc) *vals;
 +#else
@@ -165555,8 +166230,9 @@ index 0000000..f08a221
 +              switch (TREE_CODE(type)) {
 +              default:
 +                      *no_add_attrs = true;
-+                      error("variable %qD with %qE attribute must be an 
integer or a fixed length integer array type"
-+                              "or a fixed sized structure with integer 
fields", *node, name);
++                      error("variable %qD with %qE attribute must be an 
integer"
++                              " or a fixed length integer array type"
++                              " or a fixed sized structure with integer 
fields", *node, name);
 +                      break;
 +
 +              case RECORD_TYPE: {
@@ -165567,11 +166243,13 @@ index 0000000..f08a221
 +                              tree fieldtype;
 +
 +                              fieldtype = TREE_TYPE(field);
-+                              if (TREE_CODE(fieldtype) != INTEGER_TYPE) {
-+                                      *no_add_attrs = true;
-+                                      error("structure variable %qD with %qE 
attribute has a non-integer field %qE", *node, name, field);
-+                                      break;
-+                              }
++                              if (TREE_CODE(fieldtype) == INTEGER_TYPE)
++                                      continue;
++
++                              *no_add_attrs = true;
++                              error("structure variable %qD with %qE 
attribute has"
++                                      " a non-integer field %qE", *node, 
name, field);
++                              break;
 +                      }
 +
 +                      if (field)
@@ -165584,31 +166262,21 @@ index 0000000..f08a221
 +#endif
 +
 +                      for (field = TYPE_FIELDS(type); field; field = 
TREE_CHAIN(field)) {
-+                              tree fieldtype;
-+
-+                              fieldtype = TREE_TYPE(field);
-+                              mask = 1ULL << 
(TREE_INT_CST_LOW(TYPE_SIZE(fieldtype)) - 1);
-+                              mask = 2 * (mask - 1) + 1;
++                              tree random_const;
 +
-+                              if (TYPE_UNSIGNED(fieldtype))
-+                                      CONSTRUCTOR_APPEND_ELT(vals, field, 
build_int_cstu(fieldtype, mask & get_random_const()));
-+                              else
-+                                      CONSTRUCTOR_APPEND_ELT(vals, field, 
build_int_cst(fieldtype, mask & get_random_const()));
++                              random_const = 
tree_get_random_const(TREE_TYPE(field));
++                              CONSTRUCTOR_APPEND_ELT(vals, field, 
random_const);
 +                      }
 +
++                      /* Initialize the fields with random constants */
 +                      DECL_INITIAL(*node) = build_constructor(type, vals);
 +//debug_tree(DECL_INITIAL(*node));
 +                      break;
 +              }
 +
++              /* Initialize the variable with a random constant */
 +              case INTEGER_TYPE:
-+                      mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
-+                      mask = 2 * (mask - 1) + 1;
-+
-+                      if (TYPE_UNSIGNED(type))
-+                              DECL_INITIAL(*node) = build_int_cstu(type, mask 
& get_random_const());
-+                      else
-+                              DECL_INITIAL(*node) = build_int_cst(type, mask 
& get_random_const());
++                      DECL_INITIAL(*node) = tree_get_random_const(type);
 +                      break;
 +
 +              case ARRAY_TYPE: {
@@ -165621,7 +166289,8 @@ index 0000000..f08a221
 +
 +                      if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size 
|| TREE_CODE(array_size) != INTEGER_CST) {
 +                              *no_add_attrs = true;
-+                              error("array variable %qD with %qE attribute 
must be a fixed length integer array type", *node, name);
++                              error("array variable %qD with %qE attribute 
must be"
++                                      " a fixed length integer array type", 
*node, name);
 +                              break;
 +                      }
 +
@@ -165632,15 +166301,13 @@ index 0000000..f08a221
 +                      vec_alloc(vals, nelt);
 +#endif
 +
-+                      mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(elt_type)) - 
1);
-+                      mask = 2 * (mask - 1) + 1;
++                      for (i = 0; i < nelt; i++) {
++                              tree random_const = 
tree_get_random_const(elt_type);
 +
-+                      for (i = 0; i < nelt; i++)
-+                              if (TYPE_UNSIGNED(elt_type))
-+                                      CONSTRUCTOR_APPEND_ELT(vals, 
size_int(i), build_int_cstu(elt_type, mask & get_random_const()));
-+                              else
-+                                      CONSTRUCTOR_APPEND_ELT(vals, 
size_int(i), build_int_cst(elt_type, mask & get_random_const()));
++                              CONSTRUCTOR_APPEND_ELT(vals, size_int(i), 
random_const);
++                      }
 +
++                      /* Initialize the elements of the array with random 
constants */
 +                      DECL_INITIAL(*node) = build_constructor(type, vals);
 +//debug_tree(DECL_INITIAL(*node));
 +                      break;
@@ -165668,7 +166335,7 @@ index 0000000..f08a221
 +#endif
 +};
 +
-+static void register_attributes(void *event_data, void *data)
++static void register_attributes(void *event_data __unused, void *data 
__unused)
 +{
 +      register_attribute(&latent_entropy_attr);
 +}
@@ -165686,6 +166353,28 @@ index 0000000..f08a221
 +      return lookup_attribute("latent_entropy", 
DECL_ATTRIBUTES(current_function_decl)) != NULL_TREE;
 +}
 +
++static tree create_a_tmp_var(tree type, const char *name)
++{
++      tree var;
++
++      var = create_tmp_var(type, name);
++      add_referenced_var(var);
++      mark_sym_for_renaming(var);
++      return var;
++}
++
++/*
++ * Set up the next operation and its constant operand to use in the latent
++ * entropy PRNG. When RHS is specified, the request is for perturbing the
++ * local latent entropy variable, otherwise it is for perturbing the global
++ * latent entropy variable where the two operands are already given by the
++ * local and global latent entropy variables themselves.
++ *
++ * The operation is one of add/xor/rol when instrumenting the local entropy
++ * variable and one of add/xor when perturbing the global entropy variable.
++ * Rotation is not used for the latter case because it would transmit less
++ * entropy to the global variable than the other two operations.
++ */
 +static enum tree_code get_op(tree *rhs)
 +{
 +      static enum tree_code op;
@@ -165701,6 +166390,10 @@ index 0000000..f08a221
 +      case PLUS_EXPR:
 +              if (rhs) {
 +                      op = LROTATE_EXPR;
++                      /*
++                       * This code limits the value of random_const to
++                       * the size of a wide int for the rotation
++                       */
 +                      random_const &= HOST_BITS_PER_WIDE_INT - 1;
 +                      break;
 +              }
@@ -165719,85 +166412,174 @@ index 0000000..f08a221
 +{
 +      gimple_stmt_iterator gsi;
 +      gimple assign;
-+      tree addxorrol, rhs;
++      tree rhs;
 +      enum tree_code op;
 +
 +      op = get_op(&rhs);
-+      addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, 
unsigned_intDI_type_node, local_entropy, rhs);
-+      assign = gimple_build_assign(local_entropy, addxorrol);
++      assign = gimple_build_assign_with_ops(op, local_entropy, local_entropy, 
rhs);
 +      gsi = gsi_after_labels(bb);
 +      gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
 +      update_stmt(assign);
 +//debug_bb(bb);
 +}
 +
-+static void perturb_latent_entropy(basic_block bb, tree rhs)
++static void __perturb_latent_entropy(gimple_stmt_iterator *gsi, tree 
local_entropy)
 +{
-+      gimple_stmt_iterator gsi;
 +      gimple assign;
-+      tree addxorrol, temp;
++      tree temp;
++      enum tree_code op;
 +
 +      /* 1. create temporary copy of latent_entropy */
-+      temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
-+      add_referenced_var(temp);
++      temp = create_a_tmp_var(unsigned_intDI_type_node, 
"temp_latent_entropy");
 +
 +      /* 2. read... */
-+      temp = make_ssa_name(temp, NULL);
-+      assign = gimple_build_assign(temp, latent_entropy_decl);
-+      SSA_NAME_DEF_STMT(temp) = assign;
 +      add_referenced_var(latent_entropy_decl);
-+      gsi = gsi_after_labels(bb);
-+      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      mark_sym_for_renaming(latent_entropy_decl);
++      assign = gimple_build_assign(temp, latent_entropy_decl);
++      gsi_insert_before(gsi, assign, GSI_NEW_STMT);
 +      update_stmt(assign);
 +
 +      /* 3. ...modify... */
-+      addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), 
unsigned_intDI_type_node, temp, rhs);
-+      temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
-+      assign = gimple_build_assign(temp, addxorrol);
-+      SSA_NAME_DEF_STMT(temp) = assign;
-+      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      op = get_op(NULL);
++      assign = gimple_build_assign_with_ops(op, temp, temp, local_entropy);
++      gsi_insert_after(gsi, assign, GSI_NEW_STMT);
 +      update_stmt(assign);
 +
 +      /* 4. ...write latent_entropy */
 +      assign = gimple_build_assign(latent_entropy_decl, temp);
++      gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++}
++
++static bool handle_tail_calls(basic_block bb, tree local_entropy)
++{
++      gimple_stmt_iterator gsi;
++
++      for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++              gcall *call;
++              gimple stmt = gsi_stmt(gsi);
++
++              if (!is_gimple_call(stmt))
++                      continue;
++
++              call = as_a_gcall(stmt);
++              if (!gimple_call_tail_p(call))
++                      continue;
++
++              __perturb_latent_entropy(&gsi, local_entropy);
++              return true;
++      }
++
++      return false;
++}
++
++static void perturb_latent_entropy(tree local_entropy)
++{
++      edge_iterator ei;
++      edge e, last_bb_e;
++      basic_block last_bb;
++
++      gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
++      last_bb_e = single_pred_edge(EXIT_BLOCK_PTR_FOR_FN(cfun));
++
++      FOR_EACH_EDGE(e, ei, last_bb_e->src->preds) {
++              if (ENTRY_BLOCK_PTR_FOR_FN(cfun) == e->src)
++                      continue;
++              if (EXIT_BLOCK_PTR_FOR_FN(cfun) == e->src)
++                      continue;
++
++              handle_tail_calls(e->src, local_entropy);
++      }
++
++      last_bb = single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun));
++      if (!handle_tail_calls(last_bb, local_entropy)) {
++              gimple_stmt_iterator gsi = gsi_last_bb(last_bb);
++
++              __perturb_latent_entropy(&gsi, local_entropy);
++      }
++//debug_bb(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)));
++}
++
++static void init_local_entropy(basic_block bb, tree local_entropy)
++{
++      gimple assign, call;
++      tree frame_addr, rand_const, temp, fndecl, udi_frame_addr;
++      enum tree_code op;
++      gimple_stmt_iterator gsi = gsi_after_labels(bb);
++
++      /* 1. create local_entropy_frame_addr */
++      frame_addr = create_a_tmp_var(ptr_type_node, 
"local_entropy_frame_addr");
++
++      /* 2. local_entropy_frame_addr = __builtin_frame_address() */
++      fndecl = builtin_decl_implicit(BUILT_IN_FRAME_ADDRESS);
++      call = gimple_build_call(fndecl, 1, integer_zero_node);
++      gimple_call_set_lhs(call, frame_addr);
++      gsi_insert_before(&gsi, call, GSI_NEW_STMT);
++      update_stmt(call);
++
++      udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr);
++      assign = gimple_build_assign(local_entropy, udi_frame_addr);
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++
++      /* 3. create temporary copy of latent_entropy */
++      temp = create_a_tmp_var(unsigned_intDI_type_node, 
"temp_latent_entropy");
++
++      /* 4. read the global entropy variable into local entropy */
++      add_referenced_var(latent_entropy_decl);
++      mark_sym_for_renaming(latent_entropy_decl);
++      assign = gimple_build_assign(temp, latent_entropy_decl);
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++
++      /* 5. mix local_entropy_frame_addr into local entropy */
++      assign = gimple_build_assign_with_ops(BIT_XOR_EXPR, local_entropy, 
local_entropy, temp);
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++
++      rand_const = build_int_cstu(unsigned_intDI_type_node, 
get_random_const());
++      op = get_op(NULL);
++      assign = gimple_build_assign_with_ops(op, local_entropy, local_entropy, 
rand_const);
 +      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
 +      update_stmt(assign);
++//debug_bb(bb);
++}
++
++static bool create_latent_entropy_decl(void)
++{
++      varpool_node_ptr node;
++
++      if (latent_entropy_decl != NULL_TREE)
++              return true;
++
++      FOR_EACH_VARIABLE(node) {
++              tree var = NODE_DECL(node);
++
++              if (DECL_NAME_LENGTH(var) < sizeof("latent_entropy") - 1)
++                      continue;
++              if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), 
"latent_entropy"))
++                      continue;
++
++              latent_entropy_decl = var;
++//            debug_tree(var);
++              break;
++      }
++
++      return latent_entropy_decl != NULL_TREE;
 +}
 +
 +static unsigned int latent_entropy_execute(void)
 +{
 +      basic_block bb;
-+      gimple assign;
-+      gimple_stmt_iterator gsi;
 +      tree local_entropy;
 +
-+      if (!latent_entropy_decl) {
-+              varpool_node_ptr node;
-+
-+              FOR_EACH_VARIABLE(node) {
-+                      tree var = NODE_DECL(node);
-+
-+                      if (DECL_NAME_LENGTH(var) < sizeof("latent_entropy") - 
1)
-+                              continue;
-+                      if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), 
"latent_entropy"))
-+                              continue;
-+                      latent_entropy_decl = var;
-+//                    debug_tree(var);
-+                      break;
-+              }
-+              if (!latent_entropy_decl) {
-+//                    debug_tree(current_function_decl);
-+                      return 0;
-+              }
++      if (!create_latent_entropy_decl()) {
++//            debug_tree(current_function_decl);
++              return 0;
 +      }
 +
 +//fprintf(stderr, "latent_entropy: %s\n", 
IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
 +
-+      /* 1. create local entropy variable */
-+      local_entropy = create_tmp_var(unsigned_intDI_type_node, 
"local_entropy");
-+      add_referenced_var(local_entropy);
-+      mark_sym_for_renaming(local_entropy);
-+
 +      /* 2. initialize local entropy variable */
 +      gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
 +      bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
@@ -165807,13 +166589,13 @@ index 0000000..f08a221
 +              gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
 +              bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
 +      }
-+      gsi = gsi_after_labels(bb);
 +
-+      assign = gimple_build_assign(local_entropy, 
build_int_cstu(unsigned_intDI_type_node, get_random_const()));
-+//    gimple_set_location(assign, loc);
-+      gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+      update_stmt(assign);
-+//debug_bb(bb);
++      /* 1. create the local entropy variable */
++      local_entropy = create_a_tmp_var(unsigned_intDI_type_node, 
"local_entropy");
++
++      /* 2. initialize the local entropy variable */
++      init_local_entropy(bb, local_entropy);
++
 +      bb = bb->next_bb;
 +
 +      /* 3. instrument each BB with an operation on the local entropy 
variable */
@@ -165824,13 +166606,11 @@ index 0000000..f08a221
 +      };
 +
 +      /* 4. mix local entropy into the global entropy variable */
-+      gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
-+      perturb_latent_entropy(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)), 
local_entropy);
-+//debug_bb(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)));
++      perturb_latent_entropy(local_entropy);
 +      return 0;
 +}
 +
-+static void latent_entropy_start_unit(void *gcc_data, void *user_data)
++static void latent_entropy_start_unit(void *gcc_data __unused, void 
*user_data __unused)
 +{
 +      tree latent_entropy_type;
 +
@@ -165865,6 +166645,7 @@ index 0000000..f08a221
 +
 +int plugin_init(struct plugin_name_args *plugin_info, struct 
plugin_gcc_version *version)
 +{
++      bool enabled = true;
 +      const char * const plugin_name = plugin_info->base_name;
 +      const int argc = plugin_info->argc;
 +      const struct plugin_argument * const argv = plugin_info->argv;
@@ -168186,10 +168967,10 @@ index 0000000..f74d85a
 +targets += size_overflow_hash.h size_overflow_hash_aux.h 
disable_size_overflow_hash.h
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/disable_size_overflow_hash.data 
b/scripts/gcc-plugins/size_overflow_plugin/disable_size_overflow_hash.data
 new file mode 100644
-index 0000000..2a420f3
+index 0000000..e0a04a1
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/disable_size_overflow_hash.data
-@@ -0,0 +1,12444 @@
+@@ -0,0 +1,12445 @@
 +disable_so_interrupt_pnode_gru_message_queue_desc_4 interrupt_pnode 
gru_message_queue_desc 0 4 NULL
 +disable_so_bch_btree_insert_fndecl_12 bch_btree_insert fndecl 0 12 NULL
 +disable_so_macvlan_sync_address_fndecl_22 macvlan_sync_address fndecl 0 22 
NULL nohasharray
@@ -180634,6 +181415,7 @@ index 0000000..2a420f3
 +enable_so_inofree_iagctl_5194 inofree iagctl 0 5194 NULL
 +enable_so_inofreefwd_iag_4921 inofreefwd iag 0 4921 NULL
 +enable_so_iagnum_iag_23227 iagnum iag 0 23227 NULL
++enable_so_offset_lv_35617 offset lv 0 35617 NULL
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/generate_size_overflow_hash.sh 
b/scripts/gcc-plugins/size_overflow_plugin/generate_size_overflow_hash.sh
 new file mode 100644
 index 0000000..be9724d
@@ -180745,13 +181527,13 @@ index 0000000..be9724d
 +exit 0
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/insert_size_overflow_asm.c 
b/scripts/gcc-plugins/size_overflow_plugin/insert_size_overflow_asm.c
 new file mode 100644
-index 0000000..ee987da
+index 0000000..cef2817
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/insert_size_overflow_asm.c
 @@ -0,0 +1,369 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -181120,13 +181902,13 @@ index 0000000..ee987da
 +#include "gcc-generate-gimple-pass.h"
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/intentional_overflow.c 
b/scripts/gcc-plugins/size_overflow_plugin/intentional_overflow.c
 new file mode 100644
-index 0000000..f29aac6
+index 0000000..c40bc7c
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/intentional_overflow.c
 @@ -0,0 +1,1166 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -182292,13 +183074,13 @@ index 0000000..f29aac6
 +}
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/remove_unnecessary_dup.c 
b/scripts/gcc-plugins/size_overflow_plugin/remove_unnecessary_dup.c
 new file mode 100644
-index 0000000..c910983
+index 0000000..5ea5f35
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/remove_unnecessary_dup.c
 @@ -0,0 +1,137 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -182772,13 +183554,13 @@ index 0000000..4bd2e7f
 +#endif
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_debug.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_debug.c
 new file mode 100644
-index 0000000..4098952
+index 0000000..00c7430
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_debug.c
 @@ -0,0 +1,194 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -182972,10 +183754,10 @@ index 0000000..4098952
 +}
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data
 new file mode 100644
-index 0000000..cbb8a80
+index 0000000..4ed1988
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,21645 @@
+@@ -0,0 +1,21644 @@
 +enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL
 +enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 
2-3-4 3 NULL
 +enable_so_v9fs_xattr_get_acl_fndecl_4 v9fs_xattr_get_acl fndecl 5 4 NULL
@@ -194807,7 +195589,6 @@ index 0000000..cbb8a80
 +enable_so_blocksize_brcmf_sdio_35612 blocksize brcmf_sdio 0 35612 NULL
 +enable_so_sqp_demux_mlx4_caps_35613 sqp_demux mlx4_caps 0 35613 NULL 
nohasharray
 +enable_so_maxcontacts_mt_device_35613 maxcontacts mt_device 0 35613 
&enable_so_sqp_demux_mlx4_caps_35613
-+enable_so_offset_lv_35617 offset lv 0 35617 NULL
 +enable_so_generic_perform_write_fndecl_35619 generic_perform_write fndecl 0-3 
35619 NULL nohasharray
 +enable_so_ext4_update_final_de_fndecl_35619 ext4_update_final_de fndecl 2-3 
35619 &enable_so_generic_perform_write_fndecl_35619 nohasharray
 +enable_so_count_fm10k_ring_35619 count fm10k_ring 0 35619 
&enable_so_ext4_update_final_de_fndecl_35619
@@ -204623,13 +205404,14 @@ index 0000000..cbb8a80
 +enable_so_connector_write_fndecl_65534 connector_write fndecl 3 65534 NULL
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash_aux.data 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash_aux.data
 new file mode 100644
-index 0000000..17bc0d8
+index 0000000..74e91b2
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash_aux.data
-@@ -0,0 +1,92 @@
+@@ -0,0 +1,97 @@
 +enable_so_spa_set_aux_vdevs_fndecl_746 spa_set_aux_vdevs fndecl 3 746 NULL
 +enable_so_zfs_lookup_fndecl_2144 zfs_lookup fndecl 0 2144 NULL
 +enable_so_mappedread_fndecl_2627 mappedread fndecl 2 2627 NULL
++enable_so_SMACL_Alloc_fndecl_2775 SMACL_Alloc fndecl 1 2775 NULL
 +enable_so_vdev_disk_dio_alloc_fndecl_2957 vdev_disk_dio_alloc fndecl 1 2957 
NULL
 +enable_so_nv_alloc_pushpage_spl_fndecl_4286 nv_alloc_pushpage_spl fndecl 2 
4286 NULL
 +enable_so_zpl_xattr_get_fndecl_4574 zpl_xattr_get fndecl 0 4574 NULL
@@ -204648,6 +205430,7 @@ index 0000000..17bc0d8
 +enable_so_dmu_snapshot_realname_fndecl_14632 dmu_snapshot_realname fndecl 4 
14632 NULL
 +enable_so_kmem_alloc_debug_fndecl_14852 kmem_alloc_debug fndecl 1 14852 NULL
 +enable_so_kmalloc_node_nofail_fndecl_15151 kmalloc_node_nofail fndecl 1 15151 
NULL
++enable_so_size_VNet_EventHeader_15382 size VNet_EventHeader 0 15382 NULL
 +enable_so_dmu_write_uio_fndecl_16351 dmu_write_uio fndecl 4 16351 NULL
 +enable_so_zfs_log_write_fndecl_16524 zfs_log_write fndecl 6-5 16524 NULL
 +enable_so_sa_build_layouts_fndecl_16910 sa_build_layouts fndecl 3 16910 NULL
@@ -204676,6 +205459,7 @@ index 0000000..17bc0d8
 +enable_so_zfs_replay_fuids_fndecl_31479 zfs_replay_fuids fndecl 4 31479 NULL
 +enable_so_spa_history_log_to_phys_fndecl_31632 spa_history_log_to_phys fndecl 
0-1 31632 NULL
 +enable_so___zpl_xattr_get_fndecl_32601 __zpl_xattr_get fndecl 0 32601 NULL
++enable_so_VNetUserListenerRead_fndecl_34039 VNetUserListenerRead fndecl 4 
34039 NULL
 +enable_so_proc_copyout_string_fndecl_34049 proc_copyout_string fndecl 2 34049 
NULL
 +enable_so_nv_alloc_sleep_spl_fndecl_34544 nv_alloc_sleep_spl fndecl 2 34544 
NULL
 +enable_so_nv_alloc_nosleep_spl_fndecl_34761 nv_alloc_nosleep_spl fndecl 2 
34761 NULL
@@ -204709,6 +205493,8 @@ index 0000000..17bc0d8
 +enable_so_zfs_log_write_fndecl_50162 zfs_log_write fndecl 6-5 50162 NULL
 +enable_so_i_fm_alloc_fndecl_51038 i_fm_alloc fndecl 2 51038 NULL
 +enable_so_copyout_fndecl_51409 copyout fndecl 3 51409 NULL
++enable_so_VNetKernel_MemoryAllocate_fndecl_53131 VNetKernel_MemoryAllocate 
fndecl 1 53131 NULL
++enable_so_VNetUserIfWrite_fndecl_54044 VNetUserIfWrite fndecl 4 54044 NULL
 +enable_so_zvol_log_write_fndecl_54898 zvol_log_write fndecl 4-3 54898 NULL
 +enable_so_zfs_acl_node_alloc_fndecl_55641 zfs_acl_node_alloc fndecl 1 55641 
NULL
 +enable_so_get_nvlist_fndecl_56685 get_nvlist fndecl 2 56685 NULL
@@ -204721,13 +205507,13 @@ index 0000000..17bc0d8
 +enable_so_zpios_read_fndecl_64734 zpios_read fndecl 3 64734 NULL
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_ipa.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_ipa.c
 new file mode 100644
-index 0000000..0a679f8
+index 0000000..457ea92
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_ipa.c
 @@ -0,0 +1,1163 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -205890,13 +206676,13 @@ index 0000000..0a679f8
 +#include "gcc-generate-ipa-pass.h"
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_misc.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_misc.c
 new file mode 100644
-index 0000000..7f459ed
+index 0000000..b5291e1
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_misc.c
 @@ -0,0 +1,505 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -206401,13 +207187,13 @@ index 0000000..7f459ed
 +
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin.c
 new file mode 100644
-index 0000000..3f8f032
+index 0000000..be40980
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin.c
 @@ -0,0 +1,290 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -206697,13 +207483,13 @@ index 0000000..3f8f032
 +}
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin_hash.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin_hash.c
 new file mode 100644
-index 0000000..87af656
+index 0000000..f24bbc0
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin_hash.c
 @@ -0,0 +1,352 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -207055,13 +207841,13 @@ index 0000000..87af656
 +
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_transform.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_transform.c
 new file mode 100644
-index 0000000..eebcf4c
+index 0000000..1f5768d
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_transform.c
 @@ -0,0 +1,743 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow
@@ -207804,13 +208590,13 @@ index 0000000..eebcf4c
 +}
 diff --git 
a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_transform_core.c 
b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_transform_core.c
 new file mode 100644
-index 0000000..062204a
+index 0000000..69e3a85
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_transform_core.c
 @@ -0,0 +1,1025 @@
 +/*
 + * Copyright 2011-2016 by Emese Revfy <[email protected]>
-+ * Licensed under the GPL v2, or (at your option) v3
++ * Licensed under the GPL v2
 + *
 + * Homepage:
 + * https://github.com/ephox-gcc-plugins/size_overflow

Reply via email to