commit:     e93319cc05a90c06b7ee0093f5616c817eb8ecdb
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 17 11:10:17 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Wed Aug 17 11:10:17 2016 +0000
URL:        
https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=e93319cc

grsecurity-3.1-4.7.1-201608161813

 {4.7.0 => 4.7.1}/0000_README                       |    6 +-
 4.7.1/1000_linux-4.7.1.patch                       | 1141 ++++++++++++++++++++
 .../4420_grsecurity-3.1-4.7.1-201608161813.patch   |  184 ++--
 {4.7.0 => 4.7.1}/4425_grsec_remove_EI_PAX.patch    |    0
 {4.7.0 => 4.7.1}/4427_force_XATTR_PAX_tmpfs.patch  |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {4.7.0 => 4.7.1}/4435_grsec-mute-warnings.patch    |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |    0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    0
 {4.7.0 => 4.7.1}/4470_disable-compat_vdso.patch    |    0
 {4.7.0 => 4.7.1}/4475_emutramp_default_on.patch    |    0
 12 files changed, 1227 insertions(+), 104 deletions(-)

diff --git a/4.7.0/0000_README b/4.7.1/0000_README
similarity index 92%
rename from 4.7.0/0000_README
rename to 4.7.1/0000_README
index fb3a8e5..a9a1b4e 100644
--- a/4.7.0/0000_README
+++ b/4.7.1/0000_README
@@ -2,7 +2,11 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.7-201608151842.patch
+Patch: 1000_linux-4.7.1.patch
+From:  http://www.kernel.org
+Desc:  Linux 4.7.1
+
+Patch: 4420_grsecurity-3.1-4.7.1-201608161813.patch
 From:  http://www.grsecurity.net
 Desc:  hardened-sources base patch from upstream grsecurity
 

diff --git a/4.7.1/1000_linux-4.7.1.patch b/4.7.1/1000_linux-4.7.1.patch
new file mode 100644
index 0000000..79c652a
--- /dev/null
+++ b/4.7.1/1000_linux-4.7.1.patch
@@ -0,0 +1,1141 @@
+diff --git a/Documentation/cpu-freq/pcc-cpufreq.txt 
b/Documentation/cpu-freq/pcc-cpufreq.txt
+index 0a94224..9e3c3b3 100644
+--- a/Documentation/cpu-freq/pcc-cpufreq.txt
++++ b/Documentation/cpu-freq/pcc-cpufreq.txt
+@@ -159,8 +159,8 @@ to be strictly associated with a P-state.
+ 
+ 2.2 cpuinfo_transition_latency:
+ -------------------------------
+-The cpuinfo_transition_latency field is CPUFREQ_ETERNAL. The PCC specification
+-does not include a field to expose this value currently.
++The cpuinfo_transition_latency field is 0. The PCC specification does
++not include a field to expose this value currently.
+ 
+ 2.3 cpuinfo_cur_freq:
+ ---------------------
+diff --git a/Makefile b/Makefile
+index 66da9a3..84335c0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 7
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+ 
+diff --git a/arch/arm/kernel/sys_oabi-compat.c 
b/arch/arm/kernel/sys_oabi-compat.c
+index 087acb5..5f221ac 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
+       mm_segment_t fs;
+       long ret, err, i;
+ 
+-      if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
++      if (maxevents <= 0 ||
++                      maxevents > (INT_MAX/sizeof(*kbuf)) ||
++                      maxevents > (INT_MAX/sizeof(*events)))
+               return -EINVAL;
++      if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
++              return -EFAULT;
+       kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+       if (!kbuf)
+               return -ENOMEM;
+@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
+ 
+       if (nsops < 1 || nsops > SEMOPM)
+               return -EINVAL;
++      if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
++              return -EFAULT;
+       sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+       if (!sops)
+               return -ENOMEM;
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 9c0b387..51d3988 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -348,7 +348,7 @@ EXPORT(sysn32_call_table)
+       PTR     sys_ni_syscall                  /* available, was setaltroot */
+       PTR     sys_add_key
+       PTR     sys_request_key
+-      PTR     sys_keyctl                      /* 6245 */
++      PTR     compat_sys_keyctl               /* 6245 */
+       PTR     sys_set_thread_area
+       PTR     sys_inotify_init
+       PTR     sys_inotify_add_watch
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index f4f28b1..6efa713 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -504,7 +504,7 @@ EXPORT(sys32_call_table)
+       PTR     sys_ni_syscall                  /* available, was setaltroot */
+       PTR     sys_add_key                     /* 4280 */
+       PTR     sys_request_key
+-      PTR     sys_keyctl
++      PTR     compat_sys_keyctl
+       PTR     sys_set_thread_area
+       PTR     sys_inotify_init
+       PTR     sys_inotify_add_watch           /* 4285 */
+diff --git a/arch/x86/entry/syscalls/syscall_32.tbl 
b/arch/x86/entry/syscalls/syscall_32.tbl
+index 4cddd17..f848572 100644
+--- a/arch/x86/entry/syscalls/syscall_32.tbl
++++ b/arch/x86/entry/syscalls/syscall_32.tbl
+@@ -294,7 +294,7 @@
+ # 285 sys_setaltroot
+ 286   i386    add_key                 sys_add_key
+ 287   i386    request_key             sys_request_key
+-288   i386    keyctl                  sys_keyctl
++288   i386    keyctl                  sys_keyctl                      
compat_sys_keyctl
+ 289   i386    ioprio_set              sys_ioprio_set
+ 290   i386    ioprio_get              sys_ioprio_get
+ 291   i386    inotify_init            sys_inotify_init
+diff --git a/arch/x86/include/asm/microcode.h 
b/arch/x86/include/asm/microcode.h
+index 9d3a96c..01c2d14 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -133,13 +133,11 @@ static inline unsigned int x86_cpuid_family(void)
+ #ifdef CONFIG_MICROCODE
+ extern void __init load_ucode_bsp(void);
+ extern void load_ucode_ap(void);
+-extern int __init save_microcode_in_initrd(void);
+ void reload_early_microcode(void);
+ extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+ #else
+ static inline void __init load_ucode_bsp(void)                        { }
+ static inline void load_ucode_ap(void)                                { }
+-static inline int __init save_microcode_in_initrd(void)               { 
return 0; }
+ static inline void reload_early_microcode(void)                       { }
+ static inline bool
+ get_builtin_firmware(struct cpio_data *cd, const char *name)  { return false; 
}
+diff --git a/arch/x86/kernel/cpu/microcode/core.c 
b/arch/x86/kernel/cpu/microcode/core.c
+index ac360bf..12823b6 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -175,7 +175,7 @@ void load_ucode_ap(void)
+       }
+ }
+ 
+-int __init save_microcode_in_initrd(void)
++static int __init save_microcode_in_initrd(void)
+ {
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+ 
+@@ -691,4 +691,5 @@ int __init microcode_init(void)
+       return error;
+ 
+ }
++fs_initcall(save_microcode_in_initrd);
+ late_initcall(microcode_init);
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 372aad2..dffd162 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -696,13 +696,6 @@ void free_initmem(void)
+ void __init free_initrd_mem(unsigned long start, unsigned long end)
+ {
+       /*
+-       * Remember, initrd memory may contain microcode or other useful things.
+-       * Before we lose initrd mem, we need to find a place to hold them
+-       * now that normal virtual memory is enabled.
+-       */
+-      save_microcode_in_initrd();
+-
+-      /*
+        * end could be not aligned, and We can not align that,
+        * decompresser could be confused by aligned initrd_end
+        * We already reserve the end partial page before in
+diff --git a/arch/x86/power/hibernate_asm_64.S 
b/arch/x86/power/hibernate_asm_64.S
+index 3177c2b..8eee0e9 100644
+--- a/arch/x86/power/hibernate_asm_64.S
++++ b/arch/x86/power/hibernate_asm_64.S
+@@ -24,7 +24,6 @@
+ #include <asm/frame.h>
+ 
+ ENTRY(swsusp_arch_suspend)
+-      FRAME_BEGIN
+       movq    $saved_context, %rax
+       movq    %rsp, pt_regs_sp(%rax)
+       movq    %rbp, pt_regs_bp(%rax)
+@@ -48,6 +47,7 @@ ENTRY(swsusp_arch_suspend)
+       movq    %cr3, %rax
+       movq    %rax, restore_cr3(%rip)
+ 
++      FRAME_BEGIN
+       call swsusp_save
+       FRAME_END
+       ret
+@@ -104,7 +104,6 @@ ENTRY(core_restore_code)
+        /* code below belongs to the image kernel */
+       .align PAGE_SIZE
+ ENTRY(restore_registers)
+-      FRAME_BEGIN
+       /* go back to the original page tables */
+       movq    %r9, %cr3
+ 
+@@ -145,6 +144,5 @@ ENTRY(restore_registers)
+       /* tell the hibernation core that we've just restored the memory */
+       movq    %rax, in_suspend(%rip)
+ 
+-      FRAME_END
+       ret
+ ENDPROC(restore_registers)
+diff --git a/block/genhd.c b/block/genhd.c
+index 9f42526..3eebd25 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
+       if (iter) {
+               class_dev_iter_exit(iter);
+               kfree(iter);
++              seqf->private = NULL;
+       }
+ }
+ 
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index bec329b..d9ea5f9 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template 
*tmpl,
+ 
+       ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
+                                   CRYPTO_ALG_TYPE_HASH,
+-                                  CRYPTO_ALG_TYPE_AHASH_MASK);
++                                  CRYPTO_ALG_TYPE_AHASH_MASK |
++                                  crypto_requires_sync(algt->type,
++                                                       algt->mask));
+       if (IS_ERR(ghash_alg))
+               return PTR_ERR(ghash_alg);
+ 
+diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
+index ea5815c..bc769c4 100644
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -72,7 +72,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, 
int out,
+ 
+ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
+ {
+-      if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
++      if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
++          !(walk->offset & (PAGE_SIZE - 1)))
+               scatterwalk_pagedone(walk, out, more);
+ }
+ EXPORT_SYMBOL_GPL(scatterwalk_done);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 0158d3b..87ab9f6 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -723,15 +723,18 @@ retry:
+       }
+ }
+ 
+-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
++static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+ {
+       const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ 
++      if (nbits < 0)
++              return -EINVAL;
++
+       /* Cap the value to avoid overflows */
+       nbits = min(nbits,  nbits_max);
+-      nbits = max(nbits, -nbits_max);
+ 
+       credit_entropy_bits(r, nbits);
++      return 0;
+ }
+ 
+ /*********************************************************************
+@@ -1543,8 +1546,7 @@ static long random_ioctl(struct file *f, unsigned int 
cmd, unsigned long arg)
+                       return -EPERM;
+               if (get_user(ent_count, p))
+                       return -EFAULT;
+-              credit_entropy_bits_safe(&input_pool, ent_count);
+-              return 0;
++              return credit_entropy_bits_safe(&input_pool, ent_count);
+       case RNDADDENTROPY:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+@@ -1558,8 +1560,7 @@ static long random_ioctl(struct file *f, unsigned int 
cmd, unsigned long arg)
+                                   size);
+               if (retval < 0)
+                       return retval;
+-              credit_entropy_bits_safe(&input_pool, ent_count);
+-              return 0;
++              return credit_entropy_bits_safe(&input_pool, ent_count);
+       case RNDZAPENTCNT:
+       case RNDCLEARPOOL:
+               /*
+diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
+index a7ecb9a..3f0ce2a 100644
+--- a/drivers/cpufreq/pcc-cpufreq.c
++++ b/drivers/cpufreq/pcc-cpufreq.c
+@@ -555,8 +555,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy 
*policy)
+       policy->min = policy->cpuinfo.min_freq =
+               ioread32(&pcch_hdr->minimum_frequency) * 1000;
+ 
+-      policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+-
+       pr_debug("init: policy->max is %d, policy->min is %d\n",
+               policy->max, policy->min);
+ out:
+diff --git a/drivers/infiniband/hw/hfi1/Kconfig 
b/drivers/infiniband/hw/hfi1/Kconfig
+index a925fb0..f846fd5 100644
+--- a/drivers/infiniband/hw/hfi1/Kconfig
++++ b/drivers/infiniband/hw/hfi1/Kconfig
+@@ -3,7 +3,6 @@ config INFINIBAND_HFI1
+       depends on X86_64 && INFINIBAND_RDMAVT
+       select MMU_NOTIFIER
+       select CRC32
+-      default m
+       ---help---
+       This is a low-level driver for Intel OPA Gen1 adapter.
+ config HFI1_DEBUG_SDMA_ORDER
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index a2afa3b..4d79819 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1422,7 +1422,16 @@ int bond_enslave(struct net_device *bond_dev, struct 
net_device *slave_dev)
+               return -EINVAL;
+       }
+ 
+-      if (slave_ops->ndo_set_mac_address == NULL) {
++      if (slave_dev->type == ARPHRD_INFINIBAND &&
++          BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
++              netdev_warn(bond_dev, "Type (%d) supports only active-backup 
mode\n",
++                          slave_dev->type);
++              res = -EOPNOTSUPP;
++              goto err_undo_flags;
++      }
++
++      if (!slave_ops->ndo_set_mac_address ||
++          slave_dev->type == ARPHRD_INFINIBAND) {
+               netdev_warn(bond_dev, "The slave device specified does not 
support setting the MAC address\n");
+               if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+                   bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c 
b/drivers/net/ethernet/qlogic/qed/qed_spq.c
+index b122f60..03601df 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
+@@ -809,13 +809,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+                        * in a bitmap and increasing the chain consumer only
+                        * for the first successive completed entries.
+                        */
+-                      bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
++                      __set_bit(pos, p_spq->p_comp_bitmap);
+ 
+                       while (test_bit(p_spq->comp_bitmap_idx,
+                                       p_spq->p_comp_bitmap)) {
+-                              bitmap_clear(p_spq->p_comp_bitmap,
+-                                           p_spq->comp_bitmap_idx,
+-                                           SPQ_RING_SIZE);
++                              __clear_bit(p_spq->comp_bitmap_idx,
++                                          p_spq->p_comp_bitmap);
+                               p_spq->comp_bitmap_idx++;
+                               qed_chain_return_produced(&p_spq->chain);
+                       }
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 8bcd78f..a70b6c4 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -942,7 +942,6 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
+       }
+ 
+       macsec_skb_cb(skb)->req = req;
+-      macsec_skb_cb(skb)->rx_sa = rx_sa;
+       skb->dev = dev;
+       aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
+ 
+@@ -1169,6 +1168,8 @@ static rx_handler_result_t macsec_handle_frame(struct 
sk_buff **pskb)
+               }
+       }
+ 
++      macsec_skb_cb(skb)->rx_sa = rx_sa;
++
+       /* Disabled && !changed text => skip validation */
+       if (hdr->tci_an & MACSEC_TCI_C ||
+           secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index ce362bd..45b57c2 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -300,6 +300,8 @@ static int mvebu_uart_startup(struct uart_port *port)
+ static void mvebu_uart_shutdown(struct uart_port *port)
+ {
+       writel(0, port->membase + UART_CTRL);
++
++      free_irq(port->irq, port);
+ }
+ 
+ static void mvebu_uart_set_termios(struct uart_port *port,
+diff --git a/fs/dcache.c b/fs/dcache.c
+index d6847d7..1ed81bb 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -622,7 +622,6 @@ static struct dentry *dentry_kill(struct dentry *dentry)
+ 
+ failed:
+       spin_unlock(&dentry->d_lock);
+-      cpu_relax();
+       return dentry; /* try again with same dentry */
+ }
+ 
+@@ -796,6 +795,8 @@ void dput(struct dentry *dentry)
+               return;
+ 
+ repeat:
++      might_sleep();
++
+       rcu_read_lock();
+       if (likely(fast_dput(dentry))) {
+               rcu_read_unlock();
+@@ -829,8 +830,10 @@ repeat:
+ 
+ kill_it:
+       dentry = dentry_kill(dentry);
+-      if (dentry)
++      if (dentry) {
++              cond_resched();
+               goto repeat;
++      }
+ }
+ EXPORT_SYMBOL(dput);
+ 
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 3020fd7..1ea5054 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb,
+       memset(bh->b_data, 0, sb->s_blocksize);
+ 
+       bit_max = ext4_num_base_meta_clusters(sb, block_group);
++      if ((bit_max >> 3) >= bh->b_size)
++              return -EFSCORRUPTED;
++
+       for (bit = 0; bit < bit_max; bit++)
+               ext4_set_bit(bit, bh->b_data);
+ 
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2a2eef9..d7ccb7f 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -381,9 +381,13 @@ static int ext4_valid_extent(struct inode *inode, struct 
ext4_extent *ext)
+       ext4_fsblk_t block = ext4_ext_pblock(ext);
+       int len = ext4_ext_get_actual_len(ext);
+       ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+-      ext4_lblk_t last = lblock + len - 1;
+ 
+-      if (len == 0 || lblock > last)
++      /*
++       * We allow neither:
++       *  - zero length
++       *  - overflow/wrap-around
++       */
++      if (lblock + len <= lblock)
+               return 0;
+       return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -474,6 +478,10 @@ static int __ext4_ext_check(const char *function, 
unsigned int line,
+               error_msg = "invalid extent entries";
+               goto corrupted;
+       }
++      if (unlikely(depth > 32)) {
++              error_msg = "too large eh_depth";
++              goto corrupted;
++      }
+       /* Verify checksum on non-root extent tree nodes */
+       if (ext_depth(inode) != depth &&
+           !ext4_extent_block_csum_verify(inode, eh)) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f7140ca..b747ec0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -205,9 +205,9 @@ void ext4_evict_inode(struct inode *inode)
+                * Note that directories do not have this problem because they
+                * don't use page cache.
+                */
+-              if (ext4_should_journal_data(inode) &&
+-                  (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
+-                  inode->i_ino != EXT4_JOURNAL_INO) {
++              if (inode->i_ino != EXT4_JOURNAL_INO &&
++                  ext4_should_journal_data(inode) &&
++                  (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+                       journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+                       tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+ 
+@@ -2748,13 +2748,36 @@ retry:
+                               done = true;
+                       }
+               }
+-              ext4_journal_stop(handle);
++              /*
++               * Caution: If the handle is synchronous,
++               * ext4_journal_stop() can wait for transaction commit
++               * to finish which may depend on writeback of pages to
++               * complete or on page lock to be released.  In that
++               * case, we have to wait until after after we have
++               * submitted all the IO, released page locks we hold,
++               * and dropped io_end reference (for extent conversion
++               * to be able to complete) before stopping the handle.
++               */
++              if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
++                      ext4_journal_stop(handle);
++                      handle = NULL;
++              }
+               /* Submit prepared bio */
+               ext4_io_submit(&mpd.io_submit);
+               /* Unlock pages we didn't use */
+               mpage_release_unused_pages(&mpd, give_up_on_write);
+-              /* Drop our io_end reference we got from init */
+-              ext4_put_io_end(mpd.io_submit.io_end);
++              /*
++               * Drop our io_end reference we got from init. We have
++               * to be careful and use deferred io_end finishing if
++               * we are still holding the transaction as we can
++               * release the last reference to io_end which may end
++               * up doing unwritten extent conversion.
++               */
++              if (handle) {
++                      ext4_put_io_end_defer(mpd.io_submit.io_end);
++                      ext4_journal_stop(handle);
++              } else
++                      ext4_put_io_end(mpd.io_submit.io_end);
+ 
+               if (ret == -ENOSPC && sbi->s_journal) {
+                       /*
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index c1ab3ec..7f42eda 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2939,7 +2939,7 @@ ext4_mb_mark_diskspace_used(struct 
ext4_allocation_context *ac,
+               ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
+                          "fs metadata", block, block+len);
+               /* File system mounted not to panic on error
+-               * Fix the bitmap and repeat the block allocation
++               * Fix the bitmap and return EFSCORRUPTED
+                * We leak some of the blocks here.
+                */
+               ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+@@ -2948,7 +2948,7 @@ ext4_mb_mark_diskspace_used(struct 
ext4_allocation_context *ac,
+               ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+               err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+               if (!err)
+-                      err = -EAGAIN;
++                      err = -EFSCORRUPTED;
+               goto out_err;
+       }
+ 
+@@ -4513,18 +4513,7 @@ repeat:
+       }
+       if (likely(ac->ac_status == AC_STATUS_FOUND)) {
+               *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
+-              if (*errp == -EAGAIN) {
+-                      /*
+-                       * drop the reference that we took
+-                       * in ext4_mb_use_best_found
+-                       */
+-                      ext4_mb_release_context(ac);
+-                      ac->ac_b_ex.fe_group = 0;
+-                      ac->ac_b_ex.fe_start = 0;
+-                      ac->ac_b_ex.fe_len = 0;
+-                      ac->ac_status = AC_STATUS_CONTINUE;
+-                      goto repeat;
+-              } else if (*errp) {
++              if (*errp) {
+                       ext4_discard_allocated_blocks(ac);
+                       goto errout;
+               } else {
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3822a5a..639bd756 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2278,6 +2278,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+       while (es->s_last_orphan) {
+               struct inode *inode;
+ 
++              /*
++               * We may have encountered an error during cleanup; if
++               * so, skip the rest.
++               */
++              if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
++                      jbd_debug(1, "Skipping orphan recovery on fs with 
errors.\n");
++                      es->s_last_orphan = 0;
++                      break;
++              }
++
+               inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
+               if (IS_ERR(inode)) {
+                       es->s_last_orphan = 0;
+@@ -3416,6 +3426,13 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+               goto failed_mount;
+       }
+ 
++      if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
++              ext4_msg(sb, KERN_ERR,
++                       "Number of reserved GDT blocks insanely large: %d",
++                       le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
++              goto failed_mount;
++      }
++
+       if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
+               err = bdev_dax_supported(sb, blocksize);
+               if (err)
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 9154f86..6cac3dc 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -417,6 +417,15 @@ static int fuse_flush(struct file *file, fl_owner_t id)
+       fuse_sync_writes(inode);
+       inode_unlock(inode);
+ 
++      if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
++          test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
++              err = -ENOSPC;
++      if (test_bit(AS_EIO, &file->f_mapping->flags) &&
++          test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
++              err = -EIO;
++      if (err)
++              return err;
++
+       req = fuse_get_req_nofail_nopages(fc, file);
+       memset(&inarg, 0, sizeof(inarg));
+       inarg.fh = ff->fh;
+@@ -462,6 +471,21 @@ int fuse_fsync_common(struct file *file, loff_t start, 
loff_t end,
+               goto out;
+ 
+       fuse_sync_writes(inode);
++
++      /*
++       * Due to implementation of fuse writeback
++       * filemap_write_and_wait_range() does not catch errors.
++       * We have to do this directly after fuse_sync_writes()
++       */
++      if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
++          test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
++              err = -ENOSPC;
++      if (test_bit(AS_EIO, &file->f_mapping->flags) &&
++          test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
++              err = -EIO;
++      if (err)
++              goto out;
++
+       err = sync_inode_metadata(inode, 1);
+       if (err)
+               goto out;
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 9961d843..9b7cb37 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -942,7 +942,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct 
fuse_req *req)
+       arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
+               FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
+               FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+-              FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
++              FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
+               FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
+               FUSE_PARALLEL_DIROPS;
+diff --git a/fs/inode.c b/fs/inode.c
+index 4ccbc21..9ea4219 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
+ void address_space_init_once(struct address_space *mapping)
+ {
+       memset(mapping, 0, sizeof(*mapping));
+-      INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
++      INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
+       spin_lock_init(&mapping->tree_lock);
+       init_rwsem(&mapping->i_mmap_rwsem);
+       INIT_LIST_HEAD(&mapping->private_list);
+@@ -1740,8 +1740,8 @@ static int __remove_privs(struct dentry *dentry, int 
kill)
+  */
+ int file_remove_privs(struct file *file)
+ {
+-      struct dentry *dentry = file->f_path.dentry;
+-      struct inode *inode = d_inode(dentry);
++      struct dentry *dentry = file_dentry(file);
++      struct inode *inode = file_inode(file);
+       int kill;
+       int error = 0;
+ 
+@@ -1749,7 +1749,7 @@ int file_remove_privs(struct file *file)
+       if (IS_NOSEC(inode))
+               return 0;
+ 
+-      kill = file_needs_remove_privs(file);
++      kill = dentry_needs_remove_privs(dentry);
+       if (kill < 0)
+               return kill;
+       if (kill)
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 116a333..0f56deb 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, 
void __user *arg)
+               goto out;
+       }
+ 
++      same->dest_count = count;
+       ret = vfs_dedupe_file_range(file, same);
+       if (ret)
+               goto out;
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 1471db9..c6521c2 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+               rcu_read_lock();
+               ipc_lock_object(&msq->q_perm);
+ 
+-              ipc_rcu_putref(msq, ipc_rcu_free);
++              ipc_rcu_putref(msq, msg_rcu_free);
+               /* raced with RMID? */
+               if (!ipc_valid_object(&msq->q_perm)) {
+                       err = -EIDRM;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index b3757ea..5d2f875 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -449,7 +449,7 @@ static inline struct sem_array 
*sem_obtain_object_check(struct ipc_namespace *ns
+ static inline void sem_lock_and_putref(struct sem_array *sma)
+ {
+       sem_lock(sma, NULL, -1);
+-      ipc_rcu_putref(sma, ipc_rcu_free);
++      ipc_rcu_putref(sma, sem_rcu_free);
+ }
+ 
+ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
+@@ -1392,7 +1392,7 @@ static int semctl_main(struct ipc_namespace *ns, int 
semid, int semnum,
+                       rcu_read_unlock();
+                       sem_io = ipc_alloc(sizeof(ushort)*nsems);
+                       if (sem_io == NULL) {
+-                              ipc_rcu_putref(sma, ipc_rcu_free);
++                              ipc_rcu_putref(sma, sem_rcu_free);
+                               return -ENOMEM;
+                       }
+ 
+@@ -1426,20 +1426,20 @@ static int semctl_main(struct ipc_namespace *ns, int 
semid, int semnum,
+               if (nsems > SEMMSL_FAST) {
+                       sem_io = ipc_alloc(sizeof(ushort)*nsems);
+                       if (sem_io == NULL) {
+-                              ipc_rcu_putref(sma, ipc_rcu_free);
++                              ipc_rcu_putref(sma, sem_rcu_free);
+                               return -ENOMEM;
+                       }
+               }
+ 
+               if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
+-                      ipc_rcu_putref(sma, ipc_rcu_free);
++                      ipc_rcu_putref(sma, sem_rcu_free);
+                       err = -EFAULT;
+                       goto out_free;
+               }
+ 
+               for (i = 0; i < nsems; i++) {
+                       if (sem_io[i] > SEMVMX) {
+-                              ipc_rcu_putref(sma, ipc_rcu_free);
++                              ipc_rcu_putref(sma, sem_rcu_free);
+                               err = -ERANGE;
+                               goto out_free;
+                       }
+@@ -1731,7 +1731,7 @@ static struct sem_undo *find_alloc_undo(struct 
ipc_namespace *ns, int semid)
+       /* step 2: allocate new undo structure */
+       new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, 
GFP_KERNEL);
+       if (!new) {
+-              ipc_rcu_putref(sma, ipc_rcu_free);
++              ipc_rcu_putref(sma, sem_rcu_free);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 8b7d845..bc7852f 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -274,10 +274,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
+ 
+               /*
+                * Even if the caller has preloaded, try to allocate from the
+-               * cache first for the new node to get accounted.
++               * cache first for the new node to get accounted to the memory
++               * cgroup.
+                */
+               ret = kmem_cache_alloc(radix_tree_node_cachep,
+-                                     gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
++                                     gfp_mask | __GFP_NOWARN);
+               if (ret)
+                       goto out;
+ 
+@@ -300,8 +301,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
+               kmemleak_update_trace(ret);
+               goto out;
+       }
+-      ret = kmem_cache_alloc(radix_tree_node_cachep,
+-                             gfp_mask | __GFP_ACCOUNT);
++      ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+ out:
+       BUG_ON(radix_tree_is_internal_node(ret));
+       return ret;
+@@ -348,6 +348,12 @@ static int __radix_tree_preload(gfp_t gfp_mask)
+       struct radix_tree_node *node;
+       int ret = -ENOMEM;
+ 
++      /*
++       * Nodes preloaded by one cgroup can be be used by another cgroup, so
++       * they should never be accounted to any particular memory cgroup.
++       */
++      gfp_mask &= ~__GFP_ACCOUNT;
++
+       preempt_disable();
+       rtp = this_cpu_ptr(&radix_tree_preloads);
+       while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 5339c89..ca847d9 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4083,14 +4083,32 @@ static struct cftype mem_cgroup_legacy_files[] = {
+ 
+ static DEFINE_IDR(mem_cgroup_idr);
+ 
+-static void mem_cgroup_id_get(struct mem_cgroup *memcg)
++static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
+ {
+-      atomic_inc(&memcg->id.ref);
++      atomic_add(n, &memcg->id.ref);
+ }
+ 
+-static void mem_cgroup_id_put(struct mem_cgroup *memcg)
++static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+ {
+-      if (atomic_dec_and_test(&memcg->id.ref)) {
++      while (!atomic_inc_not_zero(&memcg->id.ref)) {
++              /*
++               * The root cgroup cannot be destroyed, so it's refcount must
++               * always be >= 1.
++               */
++              if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
++                      VM_BUG_ON(1);
++                      break;
++              }
++              memcg = parent_mem_cgroup(memcg);
++              if (!memcg)
++                      memcg = root_mem_cgroup;
++      }
++      return memcg;
++}
++
++static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
++{
++      if (atomic_sub_and_test(n, &memcg->id.ref)) {
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+               memcg->id.id = 0;
+ 
+@@ -4099,6 +4117,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+       }
+ }
+ 
++static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
++{
++      mem_cgroup_id_get_many(memcg, 1);
++}
++
++static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
++{
++      mem_cgroup_id_put_many(memcg, 1);
++}
++
+ /**
+  * mem_cgroup_from_id - look up a memcg from a memcg id
+  * @id: the memcg id to look up
+@@ -4736,6 +4764,8 @@ static void __mem_cgroup_clear_mc(void)
+               if (!mem_cgroup_is_root(mc.from))
+                       page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
+ 
++              mem_cgroup_id_put_many(mc.from, mc.moved_swap);
++
+               /*
+                * we charged both to->memory and to->memsw, so we
+                * should uncharge to->memory.
+@@ -4743,9 +4773,9 @@ static void __mem_cgroup_clear_mc(void)
+               if (!mem_cgroup_is_root(mc.to))
+                       page_counter_uncharge(&mc.to->memory, mc.moved_swap);
+ 
+-              css_put_many(&mc.from->css, mc.moved_swap);
++              mem_cgroup_id_get_many(mc.to, mc.moved_swap);
++              css_put_many(&mc.to->css, mc.moved_swap);
+ 
+-              /* we've already done css_get(mc.to) */
+               mc.moved_swap = 0;
+       }
+       memcg_oom_recover(from);
+@@ -5805,7 +5835,7 @@ subsys_initcall(mem_cgroup_init);
+  */
+ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ {
+-      struct mem_cgroup *memcg;
++      struct mem_cgroup *memcg, *swap_memcg;
+       unsigned short oldid;
+ 
+       VM_BUG_ON_PAGE(PageLRU(page), page);
+@@ -5820,16 +5850,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t 
entry)
+       if (!memcg)
+               return;
+ 
+-      mem_cgroup_id_get(memcg);
+-      oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
++      /*
++       * In case the memcg owning these pages has been offlined and doesn't
++       * have an ID allocated to it anymore, charge the closest online
++       * ancestor for the swap instead and transfer the memory+swap charge.
++       */
++      swap_memcg = mem_cgroup_id_get_online(memcg);
++      oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
+       VM_BUG_ON_PAGE(oldid, page);
+-      mem_cgroup_swap_statistics(memcg, true);
++      mem_cgroup_swap_statistics(swap_memcg, true);
+ 
+       page->mem_cgroup = NULL;
+ 
+       if (!mem_cgroup_is_root(memcg))
+               page_counter_uncharge(&memcg->memory, 1);
+ 
++      if (memcg != swap_memcg) {
++              if (!mem_cgroup_is_root(swap_memcg))
++                      page_counter_charge(&swap_memcg->memsw, 1);
++              page_counter_uncharge(&memcg->memsw, 1);
++      }
++
+       /*
+        * Interrupts should be disabled here because the caller holds the
+        * mapping->tree_lock lock which is taken with interrupts-off. It is
+@@ -5868,11 +5909,14 @@ int mem_cgroup_try_charge_swap(struct page *page, 
swp_entry_t entry)
+       if (!memcg)
+               return 0;
+ 
++      memcg = mem_cgroup_id_get_online(memcg);
++
+       if (!mem_cgroup_is_root(memcg) &&
+-          !page_counter_try_charge(&memcg->swap, 1, &counter))
++          !page_counter_try_charge(&memcg->swap, 1, &counter)) {
++              mem_cgroup_id_put(memcg);
+               return -ENOMEM;
++      }
+ 
+-      mem_cgroup_id_get(memcg);
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       VM_BUG_ON_PAGE(oldid, page);
+       mem_cgroup_swap_statistics(memcg, true);
+diff --git a/mm/mempool.c b/mm/mempool.c
+index 8f65464..47a659d 100644
+--- a/mm/mempool.c
++++ b/mm/mempool.c
+@@ -306,7 +306,7 @@ EXPORT_SYMBOL(mempool_resize);
+  * returns NULL. Note that due to preallocation, this function
+  * *never* fails when called from process contexts. (it might
+  * fail if called from an IRQ context.)
+- * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
++ * Note: using __GFP_ZERO is not supported.
+  */
+ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
+ {
+@@ -315,27 +315,16 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
+       wait_queue_t wait;
+       gfp_t gfp_temp;
+ 
+-      /* If oom killed, memory reserves are essential to prevent livelock */
+-      VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
+-      /* No element size to zero on allocation */
+       VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
+-
+       might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
+ 
++      gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
+       gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
+       gfp_mask |= __GFP_NOWARN;       /* failures are OK */
+ 
+       gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
+ 
+ repeat_alloc:
+-      if (likely(pool->curr_nr)) {
+-              /*
+-               * Don't allocate from emergency reserves if there are
+-               * elements available.  This check is racy, but it will
+-               * be rechecked each loop.
+-               */
+-              gfp_temp |= __GFP_NOMEMALLOC;
+-      }
+ 
+       element = pool->alloc(gfp_temp, pool->pool_data);
+       if (likely(element != NULL))
+@@ -359,12 +348,11 @@ repeat_alloc:
+        * We use gfp mask w/o direct reclaim or IO for the first round.  If
+        * alloc failed with that and @pool was empty, retry immediately.
+        */
+-      if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
++      if (gfp_temp != gfp_mask) {
+               spin_unlock_irqrestore(&pool->lock, flags);
+               gfp_temp = gfp_mask;
+               goto repeat_alloc;
+       }
+-      gfp_temp = gfp_mask;
+ 
+       /* We must not sleep if !__GFP_DIRECT_RECLAIM */
+       if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 43d2cd8..28d5ec2 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -288,6 +288,14 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
+               case 0x01:      /* IEEE MAC (Pause) */
+                       goto drop;
+ 
++              case 0x0E:      /* 802.1AB LLDP */
++                      fwd_mask |= p->br->group_fwd_mask;
++                      if (fwd_mask & (1u << dest[5]))
++                              goto forward;
++                      *pskb = skb;
++                      __br_handle_local_finish(skb);
++                      return RX_HANDLER_PASS;
++
+               default:
+                       /* Allow selective forwarding for most other protocols 
*/
+                       fwd_mask |= p->br->group_fwd_mask;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index e00e972..700b72c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -236,7 +236,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
+               /* Set window scaling on max possible window
+                * See RFC1323 for an explanation of the limit to 14
+                */
+-              space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
++              space = max_t(u32, space, sysctl_tcp_rmem[2]);
++              space = max_t(u32, space, sysctl_rmem_max);
+               space = min_t(u32, space, *window_clamp);
+               while (space > 65535 && (*rcv_wscale) < 14) {
+                       space >>= 1;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 4aed8fc..e61f7cd 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1581,9 +1581,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff 
*skb)
+           udp_lib_checksum_complete(skb))
+                       goto csum_error;
+ 
+-      if (sk_filter(sk, skb))
+-              goto drop;
+-      if (unlikely(skb->len < sizeof(struct udphdr)))
++      if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
+               goto drop;
+ 
+       udp_csum_pull_header(skb);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 47f837a..047c75a 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3562,6 +3562,10 @@ restart:
+               if (state != INET6_IFADDR_STATE_DEAD) {
+                       __ipv6_ifa_notify(RTM_DELADDR, ifa);
+                       inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
++              } else {
++                      if (idev->cnf.forwarding)
++                              addrconf_leave_anycast(ifa);
++                      addrconf_leave_solict(ifa->idev, &ifa->addr);
+               }
+ 
+               write_lock_bh(&idev->lock);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index acc09705..42a2edf 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -618,9 +618,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff 
*skb)
+           udp_lib_checksum_complete(skb))
+               goto csum_error;
+ 
+-      if (sk_filter(sk, skb))
+-              goto drop;
+-      if (unlikely(skb->len < sizeof(struct udphdr)))
++      if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
+               goto drop;
+ 
+       udp_csum_pull_header(skb);
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 923abd6..8d2f7c9 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1024,8 +1024,11 @@ static int irda_connect(struct socket *sock, struct 
sockaddr *uaddr,
+       }
+ 
+       /* Check if we have opened a local TSAP */
+-      if (!self->tsap)
+-              irda_open_tsap(self, LSAP_ANY, addr->sir_name);
++      if (!self->tsap) {
++              err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
++              if (err)
++                      goto out;
++      }
+ 
+       /* Move to connecting socket, start sending Connect Requests */
+       sock->state = SS_CONNECTING;
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 47cf460..f093322 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -328,6 +328,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+                */
+ 
+               sk = rcvr->sk;
++              local_bh_disable();
+               bh_lock_sock(sk);
+ 
+               if (sock_owned_by_user(sk)) {
+@@ -339,6 +340,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+                       sctp_inq_push(inqueue, chunk);
+ 
+               bh_unlock_sock(sk);
++              local_bh_enable();
+ 
+               /* If the chunk was backloged again, don't drop refs */
+               if (backloged)
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 9d87bba..b335ffc 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -89,12 +89,10 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk 
*chunk)
+        * Eventually, we should clean up inqueue to not rely
+        * on the BH related data structures.
+        */
+-      local_bh_disable();
+       list_add_tail(&chunk->list, &q->in_chunk_list);
+       if (chunk->asoc)
+               chunk->asoc->stats.ipackets++;
+       q->immediate.func(&q->immediate);
+-      local_bh_enable();
+ }
+ 
+ /* Peek at the next chunk on the inqeue. */
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 67154b8..7f5689a 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4301,6 +4301,7 @@ int sctp_transport_walk_start(struct rhashtable_iter 
*iter)
+ 
+       err = rhashtable_walk_start(iter);
+       if (err && err != -EAGAIN) {
++              rhashtable_walk_stop(iter);
+               rhashtable_walk_exit(iter);
+               return err;
+       }
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index ad4fa49..9068369 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void 
*v)
+                       seq_printf(seq, "%.2x", profile->hash[i]);
+               seq_puts(seq, "\n");
+       }
++      aa_put_profile(profile);
+ 
+       return 0;
+ }

diff --git a/4.7.0/4420_grsecurity-3.1-4.7-201608151842.patch 
b/4.7.1/4420_grsecurity-3.1-4.7.1-201608161813.patch
similarity index 99%
rename from 4.7.0/4420_grsecurity-3.1-4.7-201608151842.patch
rename to 4.7.1/4420_grsecurity-3.1-4.7.1-201608161813.patch
index 9857890..d01aa5c 100644
--- a/4.7.0/4420_grsecurity-3.1-4.7-201608151842.patch
+++ b/4.7.1/4420_grsecurity-3.1-4.7.1-201608161813.patch
@@ -420,7 +420,7 @@ index a3683ce..5ec8bf4 100644
  
  A toggle value indicating if modules are allowed to be loaded
 diff --git a/Makefile b/Makefile
-index 66da9a3..69d3a1ae 100644
+index 84335c0..6cb42d3 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo 
$$BASH; \
@@ -36009,7 +36009,7 @@ index 2ae8584..e8f8f29 100644
  #endif /* CONFIG_HUGETLB_PAGE */
  
 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 372aad2..15d0667 100644
+index dffd162d..f2be185 100644
 --- a/arch/x86/mm/init.c
 +++ b/arch/x86/mm/init.c
 @@ -4,6 +4,7 @@
@@ -36812,7 +36812,7 @@ index b4f2e7e..96c9c3e 100644
  
        pte = kmemcheck_pte_lookup(address);
 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index d2dc043..4bc6d52 100644
+index d2dc043..41dfc2b 100644
 --- a/arch/x86/mm/mmap.c
 +++ b/arch/x86/mm/mmap.c
 @@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
@@ -36824,7 +36824,7 @@ index d2dc043..4bc6d52 100644
  
  static int mmap_is_legacy(void)
  {
-@@ -81,16 +81,41 @@ unsigned long arch_mmap_rnd(void)
+@@ -81,16 +81,31 @@ unsigned long arch_mmap_rnd(void)
        return rnd << PAGE_SHIFT;
  }
  
@@ -36848,27 +36848,17 @@ index d2dc043..4bc6d52 100644
 +      return PAGE_ALIGN(pax_task_size - gap - rnd);
 +}
 +
-+/*
-+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
-+ * does, but not when emulating X86_32
-+ */
 +static unsigned long mmap_legacy_base(struct mm_struct *mm, unsigned long rnd)
 +{
-+      if (mmap_is_ia32()) {
-+
 +#ifdef CONFIG_PAX_SEGMEXEC
-+              if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+                      return SEGMEXEC_TASK_UNMAPPED_BASE;
-+              else
++      if (mmap_is_ia32() && (mm->pax_flags & MF_PAX_SEGMEXEC))
++              return SEGMEXEC_TASK_UNMAPPED_BASE + rnd;
 +#endif
-+
-+              return TASK_UNMAPPED_BASE;
-+      } else
-+              return TASK_UNMAPPED_BASE + rnd;
++      return TASK_UNMAPPED_BASE + rnd;
  }
  
  /*
-@@ -101,18 +126,29 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -101,18 +116,29 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  {
        unsigned long random_factor = 0UL;
  
@@ -39745,7 +39735,7 @@ index 556826a..4e7c5fd 100644
                        err = -EFAULT;
                        goto out;
 diff --git a/block/genhd.c b/block/genhd.c
-index 9f42526..fcc8648 100644
+index 3eebd25..e8524d8 100644
 --- a/block/genhd.c
 +++ b/block/genhd.c
 @@ -471,21 +471,24 @@ static char *bdevt_str(dev_t devt, char *buf)
@@ -40009,10 +39999,10 @@ index f550b5d..8488beb 100644
                err = blkcipher_walk_done(desc, &walk, 0);
        }
 diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
-index ea5815c..5880da6 100644
+index bc769c4..862da0f 100644
 --- a/crypto/scatterwalk.c
 +++ b/crypto/scatterwalk.c
-@@ -109,14 +109,20 @@ void scatterwalk_map_and_copy(void *buf, struct 
scatterlist *sg,
+@@ -110,14 +110,20 @@ void scatterwalk_map_and_copy(void *buf, struct 
scatterlist *sg,
  {
        struct scatter_walk walk;
        struct scatterlist tmp[2];
@@ -43328,7 +43318,7 @@ index d28922d..3c343d6 100644
  
        if (cmd != SIOCWANDEV)
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 0158d3b..69116a2 100644
+index 87ab9f6..4ce0b84 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -290,9 +290,6 @@
@@ -43363,7 +43353,7 @@ index 0158d3b..69116a2 100644
                        unsigned int add =
                                ((pool_size - entropy_count)*anfrac*3) >> s;
  
-@@ -1228,7 +1225,7 @@ static ssize_t extract_entropy_user(struct entropy_store 
*r, void __user *buf,
+@@ -1231,7 +1228,7 @@ static ssize_t extract_entropy_user(struct entropy_store 
*r, void __user *buf,
  
                extract_buf(r, tmp);
                i = min_t(int, nbytes, EXTRACT_SIZE);
@@ -43372,7 +43362,7 @@ index 0158d3b..69116a2 100644
                        ret = -EFAULT;
                        break;
                }
-@@ -1649,7 +1646,7 @@ static char sysctl_bootid[16];
+@@ -1650,7 +1647,7 @@ static char sysctl_bootid[16];
  static int proc_do_uuid(struct ctl_table *table, int write,
                        void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -43381,7 +43371,7 @@ index 0158d3b..69116a2 100644
        unsigned char buf[64], tmp_uuid[16], *uuid;
  
        uuid = table->data;
-@@ -1679,7 +1676,7 @@ static int proc_do_uuid(struct ctl_table *table, int 
write,
+@@ -1680,7 +1677,7 @@ static int proc_do_uuid(struct ctl_table *table, int 
write,
  static int proc_do_entropy(struct ctl_table *table, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -63635,10 +63625,10 @@ index a400288..0c59bcd 100644
         .init = loopback_net_init,
  };
 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
-index 8bcd78f..a8a3ed1 100644
+index a70b6c4..a822225 100644
 --- a/drivers/net/macsec.c
 +++ b/drivers/net/macsec.c
-@@ -3295,7 +3295,7 @@ nla_put_failure:
+@@ -3296,7 +3296,7 @@ nla_put_failure:
        return -EMSGSIZE;
  }
  
@@ -98777,7 +98767,7 @@ index 281b768..f39dcdf 100644
                return 0;
        while (nr) {
 diff --git a/fs/dcache.c b/fs/dcache.c
-index d6847d7..448382e 100644
+index 1ed81bb..85b5276 100644
 --- a/fs/dcache.c
 +++ b/fs/dcache.c
 @@ -339,8 +339,9 @@ static inline void dentry_rcuwalk_invalidate(struct dentry 
*dentry)
@@ -98823,7 +98813,7 @@ index d6847d7..448382e 100644
  {
        struct inode *inode = dentry->d_inode;
        struct dentry *parent = NULL;
-@@ -631,7 +634,7 @@ static inline struct dentry *lock_parent(struct dentry 
*dentry)
+@@ -630,7 +633,7 @@ static inline struct dentry *lock_parent(struct dentry 
*dentry)
        struct dentry *parent = dentry->d_parent;
        if (IS_ROOT(dentry))
                return NULL;
@@ -98832,7 +98822,7 @@ index d6847d7..448382e 100644
                return NULL;
        if (likely(spin_trylock(&parent->d_lock)))
                return parent;
-@@ -693,8 +696,8 @@ static inline bool fast_dput(struct dentry *dentry)
+@@ -692,8 +695,8 @@ static inline bool fast_dput(struct dentry *dentry)
         */
        if (unlikely(ret < 0)) {
                spin_lock(&dentry->d_lock);
@@ -98843,7 +98833,7 @@ index d6847d7..448382e 100644
                        spin_unlock(&dentry->d_lock);
                        return 1;
                }
-@@ -749,7 +752,7 @@ static inline bool fast_dput(struct dentry *dentry)
+@@ -748,7 +751,7 @@ static inline bool fast_dput(struct dentry *dentry)
         * else could have killed it and marked it dead. Either way, we
         * don't need to do anything else.
         */
@@ -98852,7 +98842,7 @@ index d6847d7..448382e 100644
                spin_unlock(&dentry->d_lock);
                return 1;
        }
-@@ -759,7 +762,7 @@ static inline bool fast_dput(struct dentry *dentry)
+@@ -758,7 +761,7 @@ static inline bool fast_dput(struct dentry *dentry)
         * lock, and we just tested that it was zero, so we can just
         * set it to 1.
         */
@@ -98861,7 +98851,7 @@ index d6847d7..448382e 100644
        return 0;
  }
  
-@@ -823,7 +826,7 @@ repeat:
+@@ -824,7 +827,7 @@ repeat:
                dentry->d_flags |= DCACHE_REFERENCED;
        dentry_lru_add(dentry);
  
@@ -98870,7 +98860,7 @@ index d6847d7..448382e 100644
        spin_unlock(&dentry->d_lock);
        return;
  
-@@ -838,7 +841,7 @@ EXPORT_SYMBOL(dput);
+@@ -841,7 +844,7 @@ EXPORT_SYMBOL(dput);
  /* This must be called with d_lock held */
  static inline void __dget_dlock(struct dentry *dentry)
  {
@@ -98879,7 +98869,7 @@ index d6847d7..448382e 100644
  }
  
  static inline void __dget(struct dentry *dentry)
-@@ -879,8 +882,8 @@ repeat:
+@@ -882,8 +885,8 @@ repeat:
                goto repeat;
        }
        rcu_read_unlock();
@@ -98890,7 +98880,7 @@ index d6847d7..448382e 100644
        spin_unlock(&ret->d_lock);
        return ret;
  }
-@@ -958,9 +961,9 @@ restart:
+@@ -961,9 +964,9 @@ restart:
        spin_lock(&inode->i_lock);
        hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
                spin_lock(&dentry->d_lock);
@@ -98902,7 +98892,7 @@ index d6847d7..448382e 100644
                                __dentry_kill(dentry);
                                dput(parent);
                                goto restart;
-@@ -995,7 +998,7 @@ static void shrink_dentry_list(struct list_head *list)
+@@ -998,7 +1001,7 @@ static void shrink_dentry_list(struct list_head *list)
                 * We found an inuse dentry which was not removed from
                 * the LRU because of laziness during lookup. Do not free it.
                 */
@@ -98911,7 +98901,7 @@ index d6847d7..448382e 100644
                        spin_unlock(&dentry->d_lock);
                        if (parent)
                                spin_unlock(&parent->d_lock);
-@@ -1033,8 +1036,8 @@ static void shrink_dentry_list(struct list_head *list)
+@@ -1036,8 +1039,8 @@ static void shrink_dentry_list(struct list_head *list)
                dentry = parent;
                while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
                        parent = lock_parent(dentry);
@@ -98922,7 +98912,7 @@ index d6847d7..448382e 100644
                                spin_unlock(&dentry->d_lock);
                                if (parent)
                                        spin_unlock(&parent->d_lock);
-@@ -1074,7 +1077,7 @@ static enum lru_status dentry_lru_isolate(struct 
list_head *item,
+@@ -1077,7 +1080,7 @@ static enum lru_status dentry_lru_isolate(struct 
list_head *item,
         * counts, just remove them from the LRU. Otherwise give them
         * another pass through the LRU.
         */
@@ -98931,7 +98921,7 @@ index d6847d7..448382e 100644
                d_lru_isolate(lru, dentry);
                spin_unlock(&dentry->d_lock);
                return LRU_REMOVED;
-@@ -1411,7 +1414,7 @@ static enum d_walk_ret select_collect(void *_data, 
struct dentry *dentry)
+@@ -1414,7 +1417,7 @@ static enum d_walk_ret select_collect(void *_data, 
struct dentry *dentry)
        } else {
                if (dentry->d_flags & DCACHE_LRU_LIST)
                        d_lru_del(dentry);
@@ -98940,7 +98930,7 @@ index d6847d7..448382e 100644
                        d_shrink_add(dentry, &data->dispose);
                        data->found++;
                }
-@@ -1459,7 +1462,7 @@ static enum d_walk_ret umount_check(void *_data, struct 
dentry *dentry)
+@@ -1462,7 +1465,7 @@ static enum d_walk_ret umount_check(void *_data, struct 
dentry *dentry)
                return D_WALK_CONTINUE;
  
        /* root with refcount 1 is fine */
@@ -98949,7 +98939,7 @@ index d6847d7..448382e 100644
                return D_WALK_CONTINUE;
  
        printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
-@@ -1468,7 +1471,7 @@ static enum d_walk_ret umount_check(void *_data, struct 
dentry *dentry)
+@@ -1471,7 +1474,7 @@ static enum d_walk_ret umount_check(void *_data, struct 
dentry *dentry)
                       dentry->d_inode ?
                       dentry->d_inode->i_ino : 0UL,
                       dentry,
@@ -98958,7 +98948,7 @@ index d6847d7..448382e 100644
                       dentry->d_sb->s_type->name,
                       dentry->d_sb->s_id);
        WARN_ON(1);
-@@ -1613,7 +1616,7 @@ struct dentry *__d_alloc(struct super_block *sb, const 
struct qstr *name)
+@@ -1616,7 +1619,7 @@ struct dentry *__d_alloc(struct super_block *sb, const 
struct qstr *name)
                dname = dentry->d_iname;
        } else if (name->len > DNAME_INLINE_LEN-1) {
                size_t size = offsetof(struct external_name, name[1]);
@@ -98967,7 +98957,7 @@ index d6847d7..448382e 100644
                                                  GFP_KERNEL_ACCOUNT);
                if (!p) {
                        kmem_cache_free(dentry_cache, dentry); 
-@@ -1637,7 +1640,7 @@ struct dentry *__d_alloc(struct super_block *sb, const 
struct qstr *name)
+@@ -1640,7 +1643,7 @@ struct dentry *__d_alloc(struct super_block *sb, const 
struct qstr *name)
        smp_wmb();
        dentry->d_name.name = dname;
  
@@ -98976,7 +98966,7 @@ index d6847d7..448382e 100644
        dentry->d_flags = 0;
        spin_lock_init(&dentry->d_lock);
        seqcount_init(&dentry->d_seq);
-@@ -1646,6 +1649,9 @@ struct dentry *__d_alloc(struct super_block *sb, const 
struct qstr *name)
+@@ -1649,6 +1652,9 @@ struct dentry *__d_alloc(struct super_block *sb, const 
struct qstr *name)
        dentry->d_sb = sb;
        dentry->d_op = NULL;
        dentry->d_fsdata = NULL;
@@ -98986,7 +98976,7 @@ index d6847d7..448382e 100644
        INIT_HLIST_BL_NODE(&dentry->d_hash);
        INIT_LIST_HEAD(&dentry->d_lru);
        INIT_LIST_HEAD(&dentry->d_subdirs);
-@@ -2311,7 +2317,7 @@ struct dentry *__d_lookup(const struct dentry *parent, 
const struct qstr *name)
+@@ -2314,7 +2320,7 @@ struct dentry *__d_lookup(const struct dentry *parent, 
const struct qstr *name)
                                goto next;
                }
  
@@ -98995,7 +98985,7 @@ index d6847d7..448382e 100644
                found = dentry;
                spin_unlock(&dentry->d_lock);
                break;
-@@ -2379,7 +2385,7 @@ again:
+@@ -2382,7 +2388,7 @@ again:
        spin_lock(&dentry->d_lock);
        inode = dentry->d_inode;
        isdir = S_ISDIR(inode->i_mode);
@@ -99004,7 +98994,7 @@ index d6847d7..448382e 100644
                if (!spin_trylock(&inode->i_lock)) {
                        spin_unlock(&dentry->d_lock);
                        cpu_relax();
-@@ -3598,7 +3604,7 @@ static enum d_walk_ret d_genocide_kill(void *data, 
struct dentry *dentry)
+@@ -3601,7 +3607,7 @@ static enum d_walk_ret d_genocide_kill(void *data, 
struct dentry *dentry)
  
                if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
                        dentry->d_flags |= DCACHE_GENOCIDE;
@@ -99013,7 +99003,7 @@ index d6847d7..448382e 100644
                }
        }
        return D_WALK_CONTINUE;
-@@ -3706,7 +3712,8 @@ void __init vfs_caches_init_early(void)
+@@ -3709,7 +3715,8 @@ void __init vfs_caches_init_early(void)
  void __init vfs_caches_init(void)
  {
        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -100053,10 +100043,10 @@ index 1a5e3bf..3229306 100644
  cleanup:
        brelse(bh);
 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
-index 3020fd7..6f1b8a2 100644
+index 1ea5054..093f363 100644
 --- a/fs/ext4/balloc.c
 +++ b/fs/ext4/balloc.c
-@@ -563,8 +563,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+@@ -566,8 +566,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
        /* Hm, nope.  Are (enough) root reserved clusters available? */
        if (uid_eq(sbi->s_resuid, current_fsuid()) ||
            (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && 
in_group_p(sbi->s_resgid)) ||
@@ -100102,10 +100092,10 @@ index b84aa1c..36fd3b0 100644
  
        /* locality groups */
 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
-index 2a2eef9..f37183d 100644
+index d7ccb7f..1b9329a 100644
 --- a/fs/ext4/extents.c
 +++ b/fs/ext4/extents.c
-@@ -868,7 +868,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+@@ -876,7 +876,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
        struct ext4_extent_header *eh;
        struct buffer_head *bh;
        struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
@@ -100115,7 +100105,7 @@ index 2a2eef9..f37183d 100644
  
        eh = ext_inode_hdr(inode);
 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index c1ab3ec..d5064c9 100644
+index 7f42eda..0150cd7 100644
 --- a/fs/ext4/mballoc.c
 +++ b/fs/ext4/mballoc.c
 @@ -1921,7 +1921,7 @@ void ext4_mb_simple_scan_group(struct 
ext4_allocation_context *ac,
@@ -100274,7 +100264,7 @@ index cf68100..f96c5c0 100644
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
                if (unlikely(err))
 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 3822a5a..de73791 100644
+index 639bd756..c83cfde 100644
 --- a/fs/ext4/super.c
 +++ b/fs/ext4/super.c
 @@ -1307,7 +1307,7 @@ static ext4_fsblk_t get_sb_block(void **data)
@@ -102195,10 +102185,10 @@ index cbece12..9b01171 100644
        }
  
 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
-index 9154f86..d653e02 100644
+index 6cac3dc..e3ad955 100644
 --- a/fs/fuse/file.c
 +++ b/fs/fuse/file.c
-@@ -825,9 +825,9 @@ struct fuse_fill_data {
+@@ -849,9 +849,9 @@ struct fuse_fill_data {
        unsigned nr_pages;
  };
  
@@ -102211,7 +102201,7 @@ index 9154f86..d653e02 100644
        struct inode *inode = data->inode;
        struct fuse_conn *fc = get_fuse_conn(inode);
 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
-index 9961d843..e3981b0 100644
+index 9b7cb37..01323e0 100644
 --- a/fs/fuse/inode.c
 +++ b/fs/fuse/inode.c
 @@ -29,7 +29,7 @@ static struct kmem_cache *fuse_inode_cachep;
@@ -102438,7 +102428,7 @@ index 4ea71eb..19effa7 100644
  static int can_do_hugetlb_shm(void)
  {
 diff --git a/fs/inode.c b/fs/inode.c
-index 4ccbc21..2a84b51 100644
+index 9ea4219..68587e2 100644
 --- a/fs/inode.c
 +++ b/fs/inode.c
 @@ -851,19 +851,19 @@ unsigned int get_next_ino(void)
@@ -102467,18 +102457,6 @@ index 4ccbc21..2a84b51 100644
        *p = res;
        put_cpu_var(last_ino);
        return res;
-diff --git a/fs/ioctl.c b/fs/ioctl.c
-index 116a333..0f56deb 100644
---- a/fs/ioctl.c
-+++ b/fs/ioctl.c
-@@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, 
void __user *arg)
-               goto out;
-       }
- 
-+      same->dest_count = count;
-       ret = vfs_dedupe_file_range(file, same);
-       if (ret)
-               goto out;
 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
 index 7007809..8b69fb9 100644
 --- a/fs/jbd2/commit.c
@@ -139152,7 +139130,7 @@ index ade739f..b6c2f20 100644
                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
                    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 diff --git a/ipc/msg.c b/ipc/msg.c
-index 1471db9..dbdcf7f 100644
+index c6521c2..4e2379d 100644
 --- a/ipc/msg.c
 +++ b/ipc/msg.c
 @@ -1041,7 +1041,8 @@ void msg_exit_ns(struct ipc_namespace *ns)
@@ -139188,7 +139166,7 @@ index ed81aaf..4bb6792 100644
                        goto out_err;
                *pseg = seg;
 diff --git a/ipc/sem.c b/ipc/sem.c
-index b3757ea..387f432 100644
+index 5d2f875..fe6a467 100644
 --- a/ipc/sem.c
 +++ b/ipc/sem.c
 @@ -1799,7 +1799,7 @@ static int get_queue_result(struct sem_queue *q)
@@ -147219,7 +147197,7 @@ index 27fe749..2c0e855 100644
  static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
  
 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
-index 8b7d845..d6f36af 100644
+index bc7852f..ab8f21e 100644
 --- a/lib/radix-tree.c
 +++ b/lib/radix-tree.c
 @@ -64,7 +64,7 @@ struct radix_tree_preload {
@@ -148209,7 +148187,7 @@ index 93fb63e..0aa6448 100644
        if (end == start)
                return error;
 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 5339c89..19e7895 100644
+index ca847d9..3582387 100644
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
 @@ -723,7 +723,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, 
struct page *page)
@@ -156749,7 +156727,7 @@ index debdd8b..7bcef89 100644
                                  syn_set ? 0 : icsk->icsk_user_timeout, 
syn_set)) {
                /* Has it gone just too far? */
 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 4aed8fc..7072373 100644
+index e61f7cd..8206e7a 100644
 --- a/net/ipv4/udp.c
 +++ b/net/ipv4/udp.c
 @@ -87,6 +87,7 @@
@@ -156824,7 +156802,7 @@ index 4aed8fc..7072373 100644
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_INERRORS, is_udplite);
                }
-@@ -1611,7 +1631,7 @@ csum_error:
+@@ -1609,7 +1629,7 @@ csum_error:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  drop:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -156833,7 +156811,7 @@ index 4aed8fc..7072373 100644
        kfree_skb(skb);
        return -1;
  }
-@@ -1669,7 +1689,7 @@ start_lookup:
+@@ -1667,7 +1687,7 @@ start_lookup:
                nskb = skb_clone(skb, GFP_ATOMIC);
  
                if (unlikely(!nskb)) {
@@ -156842,7 +156820,7 @@ index 4aed8fc..7072373 100644
                        __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
                                        IS_UDPLITE(sk));
                        __UDP_INC_STATS(net, UDP_MIB_INERRORS,
-@@ -1810,6 +1830,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table 
*udptable,
+@@ -1808,6 +1828,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table 
*udptable,
                goto csum_error;
  
        __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -156852,7 +156830,7 @@ index 4aed8fc..7072373 100644
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  
        /*
-@@ -2375,7 +2398,7 @@ static void udp4_format_sock(struct sock *sp, struct 
seq_file *f,
+@@ -2373,7 +2396,7 @@ static void udp4_format_sock(struct sock *sp, struct 
seq_file *f,
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
                0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
@@ -156972,7 +156950,7 @@ index 542074c..648df74 100644
               sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl));
  
 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index 47f837a..95f8192 100644
+index 047c75a..bc3f19c 100644
 --- a/net/ipv6/addrconf.c
 +++ b/net/ipv6/addrconf.c
 @@ -179,7 +179,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
@@ -157011,7 +156989,7 @@ index 47f837a..95f8192 100644
  
                if (ops->ndo_do_ioctl) {
                        mm_segment_t oldfs = get_fs();
-@@ -4025,16 +4025,23 @@ static const struct file_operations if6_fops = {
+@@ -4029,16 +4029,23 @@ static const struct file_operations if6_fops = {
        .release        = seq_release_net,
  };
  
@@ -157036,7 +157014,7 @@ index 47f837a..95f8192 100644
  }
  
  static struct pernet_operations if6_proc_net_ops = {
-@@ -4653,7 +4660,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct 
netlink_callback *cb,
+@@ -4657,7 +4664,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct 
netlink_callback *cb,
        s_ip_idx = ip_idx = cb->args[2];
  
        rcu_read_lock();
@@ -157045,7 +157023,7 @@ index 47f837a..95f8192 100644
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
-@@ -4866,7 +4873,7 @@ static inline size_t inet6_if_nlmsg_size(void)
+@@ -4870,7 +4877,7 @@ static inline size_t inet6_if_nlmsg_size(void)
               + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
  }
  
@@ -157054,7 +157032,7 @@ index 47f837a..95f8192 100644
                                      int items, int bytes)
  {
        int i;
-@@ -4876,7 +4883,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, 
atomic_long_t *mib,
+@@ -4880,7 +4887,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, 
atomic_long_t *mib,
        /* Use put_unaligned() because stats may not be aligned for u64. */
        put_unaligned(items, &stats[0]);
        for (i = 1; i < items; i++)
@@ -157063,7 +157041,7 @@ index 47f837a..95f8192 100644
  
        memset(&stats[items], 0, pad);
  }
-@@ -5333,7 +5340,7 @@ static void __ipv6_ifa_notify(int event, struct 
inet6_ifaddr *ifp)
+@@ -5337,7 +5344,7 @@ static void __ipv6_ifa_notify(int event, struct 
inet6_ifaddr *ifp)
                rt_genid_bump_ipv6(net);
                break;
        }
@@ -157072,7 +157050,7 @@ index 47f837a..95f8192 100644
  }
  
  static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
-@@ -5353,7 +5360,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int 
write,
+@@ -5357,7 +5364,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int 
write,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
@@ -157081,7 +157059,7 @@ index 47f837a..95f8192 100644
        int ret;
  
        /*
-@@ -5376,7 +5383,7 @@ static
+@@ -5380,7 +5387,7 @@ static
  int addrconf_sysctl_hop_limit(struct ctl_table *ctl, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -157090,7 +157068,7 @@ index 47f837a..95f8192 100644
        int min_hl = 1, max_hl = 255;
  
        lctl = *ctl;
-@@ -5392,7 +5399,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+@@ -5396,7 +5403,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
  {
        struct inet6_dev *idev = ctl->extra1;
        int min_mtu = IPV6_MIN_MTU;
@@ -157099,7 +157077,7 @@ index 47f837a..95f8192 100644
  
        lctl = *ctl;
        lctl.extra1 = &min_mtu;
-@@ -5467,7 +5474,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int 
write,
+@@ -5471,7 +5478,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int 
write,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
@@ -157108,7 +157086,7 @@ index 47f837a..95f8192 100644
        int ret;
  
        /*
-@@ -5532,7 +5539,7 @@ static int addrconf_sysctl_stable_secret(struct 
ctl_table *ctl, int write,
+@@ -5536,7 +5543,7 @@ static int addrconf_sysctl_stable_secret(struct 
ctl_table *ctl, int write,
        int err;
        struct in6_addr addr;
        char str[IPV6_MAX_STRLEN];
@@ -157117,7 +157095,7 @@ index 47f837a..95f8192 100644
        struct net *net = ctl->extra2;
        struct ipv6_stable_secret *secret = ctl->data;
  
-@@ -5601,7 +5608,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct 
ctl_table *ctl,
+@@ -5605,7 +5612,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct 
ctl_table *ctl,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
@@ -157126,7 +157104,7 @@ index 47f837a..95f8192 100644
        int ret;
  
        /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
-@@ -5941,7 +5948,7 @@ static int __addrconf_sysctl_register(struct net *net, 
char *dev_name,
+@@ -5945,7 +5952,7 @@ static int __addrconf_sysctl_register(struct net *net, 
char *dev_name,
                struct inet6_dev *idev, struct ipv6_devconf *p)
  {
        int i;
@@ -157744,7 +157722,7 @@ index 2255d2b..b1c03a4 100644
        }
  
 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index acc09705..b43ebf5 100644
+index 42a2edf..63f570a 100644
 --- a/net/ipv6/udp.c
 +++ b/net/ipv6/udp.c
 @@ -78,6 +78,10 @@ static u32 udp6_ehashfn(const struct net *net,
@@ -157767,7 +157745,7 @@ index acc09705..b43ebf5 100644
                        if (is_udp4)
                                UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
                                              is_udplite);
-@@ -648,7 +652,7 @@ csum_error:
+@@ -646,7 +650,7 @@ csum_error:
        __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  drop:
        __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -157776,7 +157754,7 @@ index acc09705..b43ebf5 100644
        kfree_skb(skb);
        return -1;
  }
-@@ -729,7 +733,7 @@ start_lookup:
+@@ -727,7 +731,7 @@ start_lookup:
                }
                nskb = skb_clone(skb, GFP_ATOMIC);
                if (unlikely(!nskb)) {
@@ -157785,7 +157763,7 @@ index acc09705..b43ebf5 100644
                        __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
                                         IS_UDPLITE(sk));
                        __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
-@@ -847,6 +851,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table 
*udptable,
+@@ -845,6 +849,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table 
*udptable,
                goto csum_error;
  
        __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -161067,7 +161045,7 @@ index aa37122..2b9780d 100644
                NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
        };
 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index 67154b8..8550750 100644
+index 7f5689a..a7e4ec7 100644
 --- a/net/sctp/socket.c
 +++ b/net/sctp/socket.c
 @@ -2190,11 +2190,13 @@ static int sctp_setsockopt_events(struct sock *sk, 
char __user *optval,
@@ -161085,7 +161063,7 @@ index 67154b8..8550750 100644
  
        /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
         * if there is no data to be sent or retransmit, the stack will
-@@ -4586,13 +4588,16 @@ static int sctp_getsockopt_disable_fragments(struct 
sock *sk, int len,
+@@ -4587,13 +4589,16 @@ static int sctp_getsockopt_disable_fragments(struct 
sock *sk, int len,
  static int sctp_getsockopt_events(struct sock *sk, int len, char __user 
*optval,
                                  int __user *optlen)
  {
@@ -161103,7 +161081,7 @@ index 67154b8..8550750 100644
                return -EFAULT;
        return 0;
  }
-@@ -4610,6 +4615,8 @@ static int sctp_getsockopt_events(struct sock *sk, int 
len, char __user *optval,
+@@ -4611,6 +4616,8 @@ static int sctp_getsockopt_events(struct sock *sk, int 
len, char __user *optval,
   */
  static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user 
*optval, int __user *optlen)
  {
@@ -161112,7 +161090,7 @@ index 67154b8..8550750 100644
        /* Applicable to UDP-style socket only */
        if (sctp_style(sk, TCP))
                return -EOPNOTSUPP;
-@@ -4618,7 +4625,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, 
int len, char __user *optv
+@@ -4619,7 +4626,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, 
int len, char __user *optv
        len = sizeof(int);
        if (put_user(len, optlen))
                return -EFAULT;
@@ -161122,7 +161100,7 @@ index 67154b8..8550750 100644
                return -EFAULT;
        return 0;
  }
-@@ -4992,12 +5000,15 @@ static int sctp_getsockopt_delayed_ack(struct sock 
*sk, int len,
+@@ -4993,12 +5001,15 @@ static int sctp_getsockopt_delayed_ack(struct sock 
*sk, int len,
   */
  static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user 
*optval, int __user *optlen)
  {
@@ -161139,7 +161117,7 @@ index 67154b8..8550750 100644
                return -EFAULT;
        return 0;
  }
-@@ -5038,6 +5049,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, 
int len,
+@@ -5039,6 +5050,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, 
int len,
                              ->addr_to_user(sp, &temp);
                if (space_left < addrlen)
                        return -ENOMEM;

diff --git a/4.7.0/4425_grsec_remove_EI_PAX.patch 
b/4.7.1/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 4.7.0/4425_grsec_remove_EI_PAX.patch
rename to 4.7.1/4425_grsec_remove_EI_PAX.patch

diff --git a/4.7.0/4427_force_XATTR_PAX_tmpfs.patch 
b/4.7.1/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 4.7.0/4427_force_XATTR_PAX_tmpfs.patch
rename to 4.7.1/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/4.7.0/4430_grsec-remove-localversion-grsec.patch 
b/4.7.1/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 4.7.0/4430_grsec-remove-localversion-grsec.patch
rename to 4.7.1/4430_grsec-remove-localversion-grsec.patch

diff --git a/4.7.0/4435_grsec-mute-warnings.patch 
b/4.7.1/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 4.7.0/4435_grsec-mute-warnings.patch
rename to 4.7.1/4435_grsec-mute-warnings.patch

diff --git a/4.7.0/4440_grsec-remove-protected-paths.patch 
b/4.7.1/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 4.7.0/4440_grsec-remove-protected-paths.patch
rename to 4.7.1/4440_grsec-remove-protected-paths.patch

diff --git a/4.7.0/4450_grsec-kconfig-default-gids.patch 
b/4.7.1/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 4.7.0/4450_grsec-kconfig-default-gids.patch
rename to 4.7.1/4450_grsec-kconfig-default-gids.patch

diff --git a/4.7.0/4465_selinux-avc_audit-log-curr_ip.patch 
b/4.7.1/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 4.7.0/4465_selinux-avc_audit-log-curr_ip.patch
rename to 4.7.1/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/4.7.0/4470_disable-compat_vdso.patch 
b/4.7.1/4470_disable-compat_vdso.patch
similarity index 100%
rename from 4.7.0/4470_disable-compat_vdso.patch
rename to 4.7.1/4470_disable-compat_vdso.patch

diff --git a/4.7.0/4475_emutramp_default_on.patch 
b/4.7.1/4475_emutramp_default_on.patch
similarity index 100%
rename from 4.7.0/4475_emutramp_default_on.patch
rename to 4.7.1/4475_emutramp_default_on.patch

Reply via email to