commit:     35c2ea6ad304109f6f512a74f585da8d9194ebae
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Feb  6 20:14:16 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Feb  6 20:14:16 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=35c2ea6a

proj/linux-patches: Linux patch 4.9.155

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1154_linux-4.9.155.patch | 1021 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1025 insertions(+)

diff --git a/0000_README b/0000_README
index dfd18f6..0ed3743 100644
--- a/0000_README
+++ b/0000_README
@@ -659,6 +659,10 @@ Patch:  1153_linux-4.9.154.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.154
 
+Patch:  1154_linux-4.9.155.patch
+From:   http://www.k5rnel.org
+Desc:   Linux 4.9.155
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1154_linux-4.9.155.patch b/1154_linux-4.9.155.patch
new file mode 100644
index 0000000..b8dc104
--- /dev/null
+++ b/1154_linux-4.9.155.patch
@@ -0,0 +1,1021 @@
+diff --git a/Makefile b/Makefile
+index 9964792e200f..1933ac9c3406 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 154
++SUBLEVEL = 155
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
+index 318394ed5c7a..5e11ad3164e0 100644
+--- a/arch/arm/mach-cns3xxx/pcie.c
++++ b/arch/arm/mach-cns3xxx/pcie.c
+@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
+       } else /* remote PCI bus */
+               base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
+ 
+-      return base + (where & 0xffc) + (devfn << 12);
++      return base + where + (devfn << 12);
+ }
+ 
+ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index f6e71c73cceb..76c9b51fa7f1 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -297,8 +297,10 @@ int swsusp_arch_suspend(void)
+               dcache_clean_range(__idmap_text_start, __idmap_text_end);
+ 
+               /* Clean kvm setup code to PoC? */
+-              if (el2_reset_needed())
++              if (el2_reset_needed()) {
+                       dcache_clean_range(__hyp_idmap_text_start, 
__hyp_idmap_text_end);
++                      dcache_clean_range(__hyp_text_start, __hyp_text_end);
++              }
+ 
+               /*
+                * Tell the hibernation core that we've just restored
+diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
+index d3b5f75e652e..fcb486d09555 100644
+--- a/arch/arm64/kernel/hyp-stub.S
++++ b/arch/arm64/kernel/hyp-stub.S
+@@ -28,6 +28,8 @@
+ #include <asm/virt.h>
+ 
+       .text
++      .pushsection    .hyp.text, "ax"
++
+       .align 11
+ 
+ ENTRY(__hyp_stub_vectors)
+diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
+index 2a21318fed1d..c9ca903462a6 100644
+--- a/arch/arm64/kernel/kaslr.c
++++ b/arch/arm64/kernel/kaslr.c
+@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
+        * we end up running with module randomization disabled.
+        */
+       module_alloc_base = (u64)_etext - MODULES_VSIZE;
++      __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ 
+       /*
+        * Try to map the FDT early. If this fails, we simply bail,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index f43caad30e1e..901aec4bb01d 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -862,6 +862,8 @@ static void cleanup_glue_dir(struct device *dev, struct 
kobject *glue_dir)
+               return;
+ 
+       mutex_lock(&gdp_mutex);
++      if (!kobject_has_children(glue_dir))
++              kobject_del(glue_dir);
+       kobject_put(glue_dir);
+       mutex_unlock(&gdp_mutex);
+ }
+diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
+index 524c8e0b72fd..40bdeca6d692 100644
+--- a/drivers/mmc/host/sdhci-iproc.c
++++ b/drivers/mmc/host/sdhci-iproc.c
+@@ -242,7 +242,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
+ 
+       iproc_host->data = iproc_data;
+ 
+-      mmc_of_parse(host->mmc);
++      ret = mmc_of_parse(host->mmc);
++      if (ret)
++              goto err;
++
+       sdhci_get_of_property(pdev);
+ 
+       host->mmc->caps |= iproc_host->data->mmc_caps;
+diff --git a/drivers/net/ethernet/freescale/ucc_geth.c 
b/drivers/net/ethernet/freescale/ucc_geth.c
+index ef9bc26ebc1a..714593023bbc 100644
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private 
*ugeth)
+       u16 i, j;
+       u8 __iomem *bd;
+ 
++      netdev_reset_queue(ugeth->ndev);
++
+       ug_info = ugeth->ug_info;
+       uf_info = &ug_info->uf_info;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c 
b/drivers/net/ethernet/mellanox/mlx4/fw.c
+index 84bab9f0732e..9af0887c8a29 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
+@@ -2037,9 +2037,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ {
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *outbox;
++      u64 qword_field;
+       u32 dword_field;
+-      int err;
++      u16 word_field;
+       u8 byte_field;
++      int err;
+       static const u8 a0_dmfs_query_hw_steering[] =  {
+               [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
+               [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
+@@ -2067,19 +2069,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ 
+       /* QPC/EEC/CQC/EQC/RDMARC attributes */
+ 
+-      MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
+-      MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
+-      MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
+-      MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
+-      MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
+-      MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
+-      MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+-      MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+-      MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
++      MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
++      param->qpc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
++      param->log_num_qps = byte_field & 0x1f;
++      MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
++      param->srqc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
++      param->log_num_srqs = byte_field & 0x1f;
++      MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
++      param->cqc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
++      param->log_num_cqs = byte_field & 0x1f;
++      MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
++      param->altc_base = qword_field;
++      MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
++      param->auxc_base = qword_field;
++      MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
++      param->eqc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
++      param->log_num_eqs = byte_field & 0x1f;
++      MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
++      param->num_sys_eqs = word_field & 0xfff;
++      MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
++      param->rdmarc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
++      param->log_rd_per_qp = byte_field & 0x7;
+ 
+       MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
+       if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
+@@ -2098,22 +2113,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+       /* steering attributes */
+       if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+               MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
+-              MLX4_GET(param->log_mc_entry_sz, outbox,
+-                       INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+-              MLX4_GET(param->log_mc_table_sz, outbox,
+-                       INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+-              MLX4_GET(byte_field, outbox,
+-                       INIT_HCA_FS_A0_OFFSET);
++              MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
++              param->log_mc_entry_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
++              param->log_mc_table_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
+               param->dmfs_high_steer_mode =
+                       a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
+       } else {
+               MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+-              MLX4_GET(param->log_mc_entry_sz, outbox,
+-                       INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+-              MLX4_GET(param->log_mc_hash_sz,  outbox,
+-                       INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+-              MLX4_GET(param->log_mc_table_sz, outbox,
+-                       INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
++              MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
++              param->log_mc_entry_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
++              param->log_mc_hash_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
++              param->log_mc_table_sz = byte_field & 0x1f;
+       }
+ 
+       /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+@@ -2137,15 +2151,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+       /* TPT attributes */
+ 
+       MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
+-      MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
+-      MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
++      MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
++      param->mw_enabled = byte_field >> 7;
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
++      param->log_mpt_sz = byte_field & 0x3f;
+       MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
+       MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
+ 
+       /* UAR attributes */
+ 
+       MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+-      MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
++      param->log_uar_sz = byte_field & 0xf;
+ 
+       /* phv_check enable */
+       MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 5d6eab19a9d8..da9246f6c31e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1216,14 +1216,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch 
*esw,
+       int err = 0;
+       u8 *smac_v;
+ 
+-      if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
+-              mlx5_core_warn(esw->dev,
+-                             "vport[%d] configure ingress rules failed, 
illegal mac with spoofchk\n",
+-                             vport->vport);
+-              return -EPERM;
+-
+-      }
+-
+       esw_vport_cleanup_ingress_rules(esw, vport);
+ 
+       if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
+@@ -1709,13 +1701,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch 
*esw,
+       mutex_lock(&esw->state_lock);
+       evport = &esw->vports[vport];
+ 
+-      if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
++      if (evport->info.spoofchk && !is_valid_ether_addr(mac))
+               mlx5_core_warn(esw->dev,
+-                             "MAC invalidation is not allowed when spoofchk 
is on, vport(%d)\n",
++                             "Set invalid MAC while spoofchk is on, 
vport(%d)\n",
+                              vport);
+-              err = -EPERM;
+-              goto unlock;
+-      }
+ 
+       err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
+       if (err) {
+@@ -1859,6 +1848,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch 
*esw,
+       evport = &esw->vports[vport];
+       pschk = evport->info.spoofchk;
+       evport->info.spoofchk = spoofchk;
++      if (pschk && !is_valid_ether_addr(evport->info.mac))
++              mlx5_core_warn(esw->dev,
++                             "Spoofchk in set while MAC is invalid, 
vport(%d)\n",
++                             evport->vport);
+       if (evport->enabled && esw->mode == SRIOV_LEGACY)
+               err = esw_vport_ingress_config(esw, evport);
+       if (err)
+diff --git a/drivers/net/ipvlan/ipvlan_main.c 
b/drivers/net/ipvlan/ipvlan_main.c
+index b299277361b7..4a2609c4dd6e 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -85,12 +85,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, 
u16 nval)
+                       err = ipvlan_register_nf_hook();
+                       if (!err) {
+                               mdev->l3mdev_ops = &ipvl_l3mdev_ops;
+-                              mdev->priv_flags |= IFF_L3MDEV_MASTER;
++                              mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
+                       } else
+                               goto fail;
+               } else if (port->mode == IPVLAN_MODE_L3S) {
+                       /* Old mode was L3S */
+-                      mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
++                      mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
+                       ipvlan_unregister_nf_hook();
+                       mdev->l3mdev_ops = NULL;
+               }
+@@ -158,7 +158,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
+ 
+       dev->priv_flags &= ~IFF_IPVLAN_MASTER;
+       if (port->mode == IPVLAN_MODE_L3S) {
+-              dev->priv_flags &= ~IFF_L3MDEV_MASTER;
++              dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
+               ipvlan_unregister_nf_hook();
+               dev->l3mdev_ops = NULL;
+       }
+diff --git a/drivers/platform/x86/asus-nb-wmi.c 
b/drivers/platform/x86/asus-nb-wmi.c
+index c857d2d7bbec..69ffbd7b76f7 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -477,8 +477,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+       { KE_KEY, 0x30, { KEY_VOLUMEUP } },
+       { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0x32, { KEY_MUTE } },
+-      { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
+-      { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
++      { KE_KEY, 0x35, { KEY_SCREENLOCK } },
+       { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+       { KE_KEY, 0x41, { KEY_NEXTSONG } },
+       { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 441d434a48c1..33e65b71c49a 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -48,6 +48,7 @@
+ #include "cifs_unicode.h"
+ #include "cifs_debug.h"
+ #include "cifs_fs_sb.h"
++#include "dns_resolve.h"
+ #include "ntlmssp.h"
+ #include "nterr.h"
+ #include "rfc1002pdu.h"
+@@ -306,6 +307,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
+ static int cifs_setup_volume_info(struct smb_vol *volume_info, char 
*mount_data,
+                                       const char *devname);
+ 
++/*
++ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
++ * get their ip addresses changed at some point.
++ *
++ * This should be called with server->srv_mutex held.
++ */
++#ifdef CONFIG_CIFS_DFS_UPCALL
++static int reconn_set_ipaddr(struct TCP_Server_Info *server)
++{
++      int rc;
++      int len;
++      char *unc, *ipaddr = NULL;
++
++      if (!server->hostname)
++              return -EINVAL;
++
++      len = strlen(server->hostname) + 3;
++
++      unc = kmalloc(len, GFP_KERNEL);
++      if (!unc) {
++              cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
++              return -ENOMEM;
++      }
++      snprintf(unc, len, "\\\\%s", server->hostname);
++
++      rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
++      kfree(unc);
++
++      if (rc < 0) {
++              cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: 
%d\n",
++                       __func__, server->hostname, rc);
++              return rc;
++      }
++
++      rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
++                                strlen(ipaddr));
++      kfree(ipaddr);
++
++      return !rc ? -1 : 0;
++}
++#else
++static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
++{
++      return 0;
++}
++#endif
++
+ /*
+  * cifs tcp session reconnection
+  *
+@@ -403,6 +451,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
+               rc = generic_ip_connect(server);
+               if (rc) {
+                       cifs_dbg(FYI, "reconnect error %d\n", rc);
++                      rc = reconn_set_ipaddr(server);
++                      if (rc) {
++                              cifs_dbg(FYI, "%s: failed to resolve hostname: 
%d\n",
++                                       __func__, rc);
++                      }
+                       mutex_unlock(&server->srv_mutex);
+                       msleep(3000);
+               } else {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 50251a8af0ce..52b6e4a40748 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2686,8 +2686,8 @@ SMB2_query_directory(const unsigned int xid, struct 
cifs_tcon *tcon,
+               if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
+                       srch_inf->endOfSearch = true;
+                       rc = 0;
+-              }
+-              cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
++              } else
++                      cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+               goto qdir_exit;
+       }
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index f903b86b06e5..29c0286bd638 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1164,15 +1164,11 @@ static enum lru_status 
dentry_lru_isolate_shrink(struct list_head *item,
+  */
+ void shrink_dcache_sb(struct super_block *sb)
+ {
+-      long freed;
+-
+       do {
+               LIST_HEAD(dispose);
+ 
+-              freed = list_lru_walk(&sb->s_dentry_lru,
++              list_lru_walk(&sb->s_dentry_lru,
+                       dentry_lru_isolate_shrink, &dispose, 1024);
+-
+-              this_cpu_sub(nr_dentry_unused, freed);
+               shrink_dentry_list(&dispose);
+               cond_resched();
+       } while (list_lru_count(&sb->s_dentry_lru) > 0);
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 05f1ec728840..073126707270 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -1705,9 +1705,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, 
u32 *minext,
+                       goto next_iter;
+               }
+               if (ret == -E2BIG) {
+-                      n += rbm->bii - initial_bii;
+                       rbm->bii = 0;
+                       rbm->offset = 0;
++                      n += (rbm->bii - initial_bii);
+                       goto res_covered_end_of_rgrp;
+               }
+               return ret;
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index a64adc2fced9..56b4f855fa9b 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -101,9 +101,9 @@ int __fsnotify_parent(struct path *path, struct dentry 
*dentry, __u32 mask)
+       parent = dget_parent(dentry);
+       p_inode = parent->d_inode;
+ 
+-      if (unlikely(!fsnotify_inode_watches_children(p_inode)))
++      if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
+               __fsnotify_update_child_dentry_flags(p_inode);
+-      else if (p_inode->i_fsnotify_mask & mask) {
++      } else if (p_inode->i_fsnotify_mask & mask & ~FS_EVENT_ON_CHILD) {
+               struct name_snapshot name;
+ 
+               /* we are notifying a parent so come up with the new mask which
+@@ -207,6 +207,10 @@ int fsnotify(struct inode *to_tell, __u32 mask, void 
*data, int data_is,
+       else
+               mnt = NULL;
+ 
++      /* An event "on child" is not intended for a mount mark */
++      if (mask & FS_EVENT_ON_CHILD)
++              mnt = NULL;
++
+       /*
+        * Optimization: srcu_read_lock() has a memory barrier which can
+        * be expensive.  It protects walking the *_fsnotify_marks lists.
+diff --git a/fs/read_write.c b/fs/read_write.c
+index ba280596ec78..9819f7c6c8c5 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -392,8 +392,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter 
*iter, loff_t *ppos)
+       iter->type |= WRITE;
+       ret = file->f_op->write_iter(&kiocb, iter);
+       BUG_ON(ret == -EIOCBQUEUED);
+-      if (ret > 0)
++      if (ret > 0) {
+               *ppos = kiocb.ki_pos;
++              fsnotify_modify(file);
++      }
+       return ret;
+ }
+ EXPORT_SYMBOL(vfs_iter_write);
+diff --git a/fs/super.c b/fs/super.c
+index 7e9beab77259..abe2541fb28c 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -119,13 +119,23 @@ static unsigned long super_cache_count(struct shrinker 
*shrink,
+       sb = container_of(shrink, struct super_block, s_shrink);
+ 
+       /*
+-       * Don't call trylock_super as it is a potential
+-       * scalability bottleneck. The counts could get updated
+-       * between super_cache_count and super_cache_scan anyway.
+-       * Call to super_cache_count with shrinker_rwsem held
+-       * ensures the safety of call to list_lru_shrink_count() and
+-       * s_op->nr_cached_objects().
++       * We don't call trylock_super() here as it is a scalability bottleneck,
++       * so we're exposed to partial setup state. The shrinker rwsem does not
++       * protect filesystem operations backing list_lru_shrink_count() or
++       * s_op->nr_cached_objects(). Counts can change between
++       * super_cache_count and super_cache_scan, so we really don't need locks
++       * here.
++       *
++       * However, if we are currently mounting the superblock, the underlying
++       * filesystem might be in a state of partial construction and hence it
++       * is dangerous to access it.  trylock_super() uses a MS_BORN check to
++       * avoid this situation, so do the same here. The memory barrier is
++       * matched with the one in mount_fs() as we don't hold locks here.
+        */
++      if (!(sb->s_flags & MS_BORN))
++              return 0;
++      smp_rmb();
++
+       if (sb->s_op && sb->s_op->nr_cached_objects)
+               total_objects = sb->s_op->nr_cached_objects(sb, sc);
+ 
+@@ -1193,6 +1203,14 @@ mount_fs(struct file_system_type *type, int flags, 
const char *name, void *data)
+       sb = root->d_sb;
+       BUG_ON(!sb);
+       WARN_ON(!sb->s_bdi);
++
++      /*
++       * Write barrier is for super_cache_count(). We place it before setting
++       * MS_BORN as the data dependency between the two functions is the
++       * superblock structure contents that we just set up, not the MS_BORN
++       * flag.
++       */
++      smp_wmb();
+       sb->s_flags |= MS_BORN;
+ 
+       error = security_sb_kern_mount(sb, flags, secdata);
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index e6284591599e..5957c6a3fd7f 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -113,6 +113,23 @@ extern void kobject_put(struct kobject *kobj);
+ extern const void *kobject_namespace(struct kobject *kobj);
+ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+ 
++/**
++ * kobject_has_children - Returns whether a kobject has children.
++ * @kobj: the object to test
++ *
++ * This will return whether a kobject has other kobjects as children.
++ *
++ * It does NOT account for the presence of attribute files, only sub
++ * directories. It also assumes there is no concurrent addition or
++ * removal of such children, and thus relies on external locking.
++ */
++static inline bool kobject_has_children(struct kobject *kobj)
++{
++      WARN_ON_ONCE(atomic_read(&kobj->kref.refcount) == 0);
++
++      return kobj->sd && kobj->sd->dir.subdirs;
++}
++
+ struct kobj_type {
+       void (*release)(struct kobject *kobj);
+       const struct sysfs_ops *sysfs_ops;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index f254982e1a8f..2ecf0f32444e 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1368,6 +1368,7 @@ struct net_device_ops {
+  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
+  *    entity (i.e. the master device for bridged veth)
+  * @IFF_MACSEC: device is a MACsec device
++ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+  */
+ enum netdev_priv_flags {
+       IFF_802_1Q_VLAN                 = 1<<0,
+@@ -1398,6 +1399,7 @@ enum netdev_priv_flags {
+       IFF_RXFH_CONFIGURED             = 1<<25,
+       IFF_PHONY_HEADROOM              = 1<<26,
+       IFF_MACSEC                      = 1<<27,
++      IFF_L3MDEV_RX_HANDLER           = 1<<28,
+ };
+ 
+ #define IFF_802_1Q_VLAN                       IFF_802_1Q_VLAN
+@@ -1427,6 +1429,7 @@ enum netdev_priv_flags {
+ #define IFF_TEAM                      IFF_TEAM
+ #define IFF_RXFH_CONFIGURED           IFF_RXFH_CONFIGURED
+ #define IFF_MACSEC                    IFF_MACSEC
++#define IFF_L3MDEV_RX_HANDLER         IFF_L3MDEV_RX_HANDLER
+ 
+ /**
+  *    struct net_device - The DEVICE structure.
+@@ -4244,6 +4247,11 @@ static inline bool netif_supports_nofcs(struct 
net_device *dev)
+       return dev->priv_flags & IFF_SUPP_NOFCS;
+ }
+ 
++static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
++{
++      return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
++}
++
+ static inline bool netif_is_l3_master(const struct net_device *dev)
+ {
+       return dev->priv_flags & IFF_L3MDEV_MASTER;
+diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
+index 3832099289c5..128487658ff7 100644
+--- a/include/net/l3mdev.h
++++ b/include/net/l3mdev.h
+@@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 
proto)
+ 
+       if (netif_is_l3_slave(skb->dev))
+               master = netdev_master_upper_dev_get_rcu(skb->dev);
+-      else if (netif_is_l3_master(skb->dev))
++      else if (netif_is_l3_master(skb->dev) ||
++               netif_has_l3_rx_handler(skb->dev))
+               master = skb->dev;
+ 
+       if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 6dd7ff4b337a..d9394fcd0e2c 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -525,12 +525,14 @@ static struct task_struct *find_alive_thread(struct 
task_struct *p)
+       return NULL;
+ }
+ 
+-static struct task_struct *find_child_reaper(struct task_struct *father)
++static struct task_struct *find_child_reaper(struct task_struct *father,
++                                              struct list_head *dead)
+       __releases(&tasklist_lock)
+       __acquires(&tasklist_lock)
+ {
+       struct pid_namespace *pid_ns = task_active_pid_ns(father);
+       struct task_struct *reaper = pid_ns->child_reaper;
++      struct task_struct *p, *n;
+ 
+       if (likely(reaper != father))
+               return reaper;
+@@ -546,6 +548,12 @@ static struct task_struct *find_child_reaper(struct 
task_struct *father)
+               panic("Attempted to kill init! exitcode=0x%08x\n",
+                       father->signal->group_exit_code ?: father->exit_code);
+       }
++
++      list_for_each_entry_safe(p, n, dead, ptrace_entry) {
++              list_del_init(&p->ptrace_entry);
++              release_task(p);
++      }
++
+       zap_pid_ns_processes(pid_ns);
+       write_lock_irq(&tasklist_lock);
+ 
+@@ -632,7 +640,7 @@ static void forget_original_parent(struct task_struct 
*father,
+               exit_ptrace(father, dead);
+ 
+       /* Can drop and reacquire tasklist_lock */
+-      reaper = find_child_reaper(father);
++      reaper = find_child_reaper(father, dead);
+       if (list_empty(&father->children))
+               return;
+ 
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 851efb004857..4f1f5fd12042 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -336,7 +336,8 @@ static void kill_procs(struct list_head *to_kill, int 
forcekill, int trapno,
+                       if (fail || tk->addr_valid == 0) {
+                               pr_err("Memory failure: %#lx: forcibly killing 
%s:%d because of failure to unmap corrupted page\n",
+                                      pfn, tk->tsk->comm, tk->tsk->pid);
+-                              force_sig(SIGKILL, tk->tsk);
++                              do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
++                                               tk->tsk, PIDTYPE_PID);
+                       }
+ 
+                       /*
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 821623fc7091..b08c1a4a1c22 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1044,10 +1044,13 @@ out:
+        * If migration is successful, decrease refcount of the newpage
+        * which will not free the page because new page owner increased
+        * refcounter. As well, if it is LRU page, add the page to LRU
+-       * list in here.
++       * list in here. Use the old state of the isolated source page to
++       * determine if we migrated a LRU page. newpage was already unlocked
++       * and possibly modified by its owner - don't rely on the page
++       * state.
+        */
+       if (rc == MIGRATEPAGE_SUCCESS) {
+-              if (unlikely(__PageMovable(newpage)))
++              if (unlikely(!is_lru))
+                       put_page(newpage);
+               else
+                       putback_lru_page(newpage);
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 4a184157cc3d..1de3695cb419 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -861,6 +861,13 @@ static void oom_kill_process(struct oom_control *oc, 
const char *message)
+        * still freeing memory.
+        */
+       read_lock(&tasklist_lock);
++
++      /*
++       * The task 'p' might have already exited before reaching here. The
++       * put_task_struct() will free task_struct 'p' while the loop still try
++       * to access the field of 'p', so, get an extra reference.
++       */
++      get_task_struct(p);
+       for_each_thread(p, t) {
+               list_for_each_entry(child, &t->children, sibling) {
+                       unsigned int child_points;
+@@ -880,6 +887,7 @@ static void oom_kill_process(struct oom_control *oc, const 
char *message)
+                       }
+               }
+       }
++      put_task_struct(p);
+       read_unlock(&tasklist_lock);
+ 
+       p = find_lock_task_mm(victim);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 496f8d86b503..c7334d1e392a 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -423,6 +423,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+        * fragment.
+        */
+ 
++      err = -EINVAL;
+       /* Find out where to put this fragment.  */
+       prev_tail = qp->q.fragments_tail;
+       if (!prev_tail)
+@@ -499,7 +500,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+ 
+ discard_qp:
+       inet_frag_kill(&qp->q);
+-      err = -EINVAL;
+       __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+ err:
+       kfree_skb(skb);
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index c81b2c5caf26..8885dbad217b 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -359,6 +359,9 @@ int inet6_bind(struct socket *sock, struct sockaddr 
*uaddr, int addr_len)
+                                       err = -EINVAL;
+                                       goto out_unlock;
+                               }
++                      }
++
++                      if (sk->sk_bound_dev_if) {
+                               dev = dev_get_by_index_rcu(net, 
sk->sk_bound_dev_if);
+                               if (!dev) {
+                                       err = -ENODEV;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index b96dbe38ecad..4ae758bcb2cf 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -83,8 +83,7 @@
+ #define L2TP_SLFLAG_S    0x40000000
+ #define L2TP_SL_SEQ_MASK   0x00ffffff
+ 
+-#define L2TP_HDR_SIZE_SEQ             10
+-#define L2TP_HDR_SIZE_NOSEQ           6
++#define L2TP_HDR_SIZE_MAX             14
+ 
+ /* Default trace flags */
+ #define L2TP_DEFAULT_DEBUG_FLAGS      0
+@@ -796,11 +795,9 @@ void l2tp_recv_common(struct l2tp_session *session, 
struct sk_buff *skb,
+                                "%s: recv data ns=%u, session nr=%u\n",
+                                session->name, ns, session->nr);
+               }
++              ptr += 4;
+       }
+ 
+-      /* Advance past L2-specific header, if present */
+-      ptr += session->l2specific_len;
+-
+       if (L2TP_SKB_CB(skb)->has_seq) {
+               /* Received a packet with sequence numbers. If we're the LNS,
+                * check if we sre sending sequence numbers and if not,
+@@ -944,7 +941,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, 
struct sk_buff *skb,
+       __skb_pull(skb, sizeof(struct udphdr));
+ 
+       /* Short packet? */
+-      if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
++      if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
+               l2tp_info(tunnel, L2TP_MSG_DATA,
+                         "%s: recv short packet (len=%d)\n",
+                         tunnel->name, skb->len);
+@@ -1023,6 +1020,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel 
*tunnel, struct sk_buff *skb,
+               goto error;
+       }
+ 
++      if (tunnel->version == L2TP_HDR_VER_3 &&
++          l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++              goto error;
++
+       l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, 
payload_hook);
+       l2tp_session_dec_refcount(session);
+ 
+@@ -1122,21 +1123,20 @@ static int l2tp_build_l2tpv3_header(struct 
l2tp_session *session, void *buf)
+               memcpy(bufp, &session->cookie[0], session->cookie_len);
+               bufp += session->cookie_len;
+       }
+-      if (session->l2specific_len) {
+-              if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+-                      u32 l2h = 0;
+-                      if (session->send_seq) {
+-                              l2h = 0x40000000 | session->ns;
+-                              session->ns++;
+-                              session->ns &= 0xffffff;
+-                              l2tp_dbg(session, L2TP_MSG_SEQ,
+-                                       "%s: updated ns to %u\n",
+-                                       session->name, session->ns);
+-                      }
++      if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
++              u32 l2h = 0;
+ 
+-                      *((__be32 *) bufp) = htonl(l2h);
++              if (session->send_seq) {
++                      l2h = 0x40000000 | session->ns;
++                      session->ns++;
++                      session->ns &= 0xffffff;
++                      l2tp_dbg(session, L2TP_MSG_SEQ,
++                               "%s: updated ns to %u\n",
++                               session->name, session->ns);
+               }
+-              bufp += session->l2specific_len;
++
++              *((__be32 *)bufp) = htonl(l2h);
++              bufp += 4;
+       }
+ 
+       return bufp - optr;
+@@ -1813,7 +1813,7 @@ int l2tp_session_delete(struct l2tp_session *session)
+ EXPORT_SYMBOL_GPL(l2tp_session_delete);
+ 
+ /* We come here whenever a session's send_seq, cookie_len or
+- * l2specific_len parameters are set.
++ * l2specific_type parameters are set.
+  */
+ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+ {
+@@ -1822,7 +1822,8 @@ void l2tp_session_set_header_len(struct l2tp_session 
*session, int version)
+               if (session->send_seq)
+                       session->hdr_len += 4;
+       } else {
+-              session->hdr_len = 4 + session->cookie_len + 
session->l2specific_len;
++              session->hdr_len = 4 + session->cookie_len;
++              session->hdr_len += l2tp_get_l2specific_len(session);
+               if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+                       session->hdr_len += 4;
+       }
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 86356a23a0a7..7cc49715606e 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -314,6 +314,37 @@ do {                                                      
                \
+ #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
+ #endif
+ 
++static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
++{
++      switch (session->l2specific_type) {
++      case L2TP_L2SPECTYPE_DEFAULT:
++              return 4;
++      case L2TP_L2SPECTYPE_NONE:
++      default:
++              return 0;
++      }
++}
++
++static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, 
struct sk_buff *skb,
++                                             unsigned char **ptr, unsigned 
char **optr)
++{
++      int opt_len = session->peer_cookie_len + 
l2tp_get_l2specific_len(session);
++
++      if (opt_len > 0) {
++              int off = *ptr - *optr;
++
++              if (!pskb_may_pull(skb, off + opt_len))
++                      return -1;
++
++              if (skb->data != *optr) {
++                      *optr = skb->data;
++                      *ptr = skb->data + off;
++              }
++      }
++
++      return 0;
++}
++
+ #define l2tp_printk(ptr, type, func, fmt, ...)                                
\
+ do {                                                                  \
+       if (((ptr)->debug) & (type))                                    \
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 9d77a54e8854..03a696d3bcd9 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -157,6 +157,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+       }
+ 
++      if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++              goto discard_sess;
++
+       l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, 
tunnel->recv_payload_hook);
+       l2tp_session_dec_refcount(session);
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 247097289fd0..5e6d09863480 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -169,6 +169,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+       }
+ 
++      if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++              goto discard_sess;
++
+       l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
+                        tunnel->recv_payload_hook);
+       l2tp_session_dec_refcount(session);
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index 94d05806a9a2..f0ecaec1ff3d 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
+ {
+       struct nr_sock *nr = nr_sk(sk);
+ 
+-      mod_timer(&nr->t1timer, jiffies + nr->t1);
++      sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
+ }
+ 
+ void nr_start_t2timer(struct sock *sk)
+ {
+       struct nr_sock *nr = nr_sk(sk);
+ 
+-      mod_timer(&nr->t2timer, jiffies + nr->t2);
++      sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
+ }
+ 
+ void nr_start_t4timer(struct sock *sk)
+ {
+       struct nr_sock *nr = nr_sk(sk);
+ 
+-      mod_timer(&nr->t4timer, jiffies + nr->t4);
++      sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
+ }
+ 
+ void nr_start_idletimer(struct sock *sk)
+@@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
+       struct nr_sock *nr = nr_sk(sk);
+ 
+       if (nr->idle > 0)
+-              mod_timer(&nr->idletimer, jiffies + nr->idle);
++              sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
+ }
+ 
+ void nr_start_heartbeat(struct sock *sk)
+ {
+-      mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
++      sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
+ }
+ 
+ void nr_stop_t1timer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->t1timer);
++      sk_stop_timer(sk, &nr_sk(sk)->t1timer);
+ }
+ 
+ void nr_stop_t2timer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->t2timer);
++      sk_stop_timer(sk, &nr_sk(sk)->t2timer);
+ }
+ 
+ void nr_stop_t4timer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->t4timer);
++      sk_stop_timer(sk, &nr_sk(sk)->t4timer);
+ }
+ 
+ void nr_stop_idletimer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->idletimer);
++      sk_stop_timer(sk, &nr_sk(sk)->idletimer);
+ }
+ 
+ void nr_stop_heartbeat(struct sock *sk)
+ {
+-      del_timer(&sk->sk_timer);
++      sk_stop_timer(sk, &sk->sk_timer);
+ }
+ 
+ int nr_t1timer_running(struct sock *sk)
+diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
+index 0fc76d845103..9f704a7f2a28 100644
+--- a/net/rose/rose_route.c
++++ b/net/rose/rose_route.c
+@@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev)
+ 
+ /*
+  *    Route a frame to an appropriate AX.25 connection.
++ *    A NULL ax25_cb indicates an internally generated frame.
+  */
+ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ {
+@@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 
+       if (skb->len < ROSE_MIN_LEN)
+               return res;
++
++      if (!ax25)
++              return rose_loopback_queue(skb, NULL);
++
+       frametype = skb->data[2];
+       lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+       if (frametype == ROSE_CALL_REQUEST &&

Reply via email to