diff --git a/Makefile b/Makefile
index 1608a9b71381..4aabae365a6c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 13
-SUBLEVEL = 13
+SUBLEVEL = 14
 EXTRAVERSION =
 NAME = Fearless Coyote
 
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a07ef3d6b3ec..2b478224532e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -696,6 +696,7 @@ static int dmatest_func(void *data)
                         * free it this time?" dancing.  For now, just
                         * leave it dangling.
                         */
+                       WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
                        dmaengine_unmap_put(um);
                        result("test timed out", total_tests, src_off, dst_off,
                               len, 0);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 80d860cb0746..7a3b201d51df 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -455,6 +455,7 @@ static const struct pci_id_table 
pci_dev_descr_sbridge_table[] = {
 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
                /* Processor Home Agent */
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
+       { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
 
                /* Memory controller */
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
@@ -465,7 +466,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
 
                /* Optional, mode 2HA */
-       { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
@@ -2260,6 +2260,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
 next_imc:
        sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, 
sbridge_dev);
        if (!sbridge_dev) {
+               /* If the HA1 wasn't found, don't create EDAC second memory 
controller */
+               if (dev_descr->dom == IMC1 && devno != 1) {
+                       edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was 
absent)\n",
+                                PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+                       pci_dev_put(pdev);
+                       return 0;
+               }
 
                if (dev_descr->dom == SOCK)
                        goto out_imc;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index f4e8fbec6a94..b5304e264881 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
                return NULL;
        }
 
-       while (buflen > 0) {
+       while (buflen >= sizeof(*union_desc)) {
                union_desc = (struct usb_cdc_union_desc *)buf;
 
+               if (union_desc->bLength > buflen) {
+                       dev_err(&intf->dev, "Too large descriptor\n");
+                       return NULL;
+               }
+
                if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
                    union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
                        dev_dbg(&intf->dev, "Found union header\n");
-                       return union_desc;
+
+                       if (union_desc->bLength >= sizeof(*union_desc))
+                               return union_desc;
+
+                       dev_err(&intf->dev,
+                               "Union descriptor to short (%d vs %zd\n)",
+                               union_desc->bLength, sizeof(*union_desc));
+                       return NULL;
                }
 
                buflen -= union_desc->bLength;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7f282e8f4e7f..dc7f952e341f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
 };
 
 struct mlx5e_tc_flow_parse_attr {
+       struct ip_tunnel_info tun_info;
        struct mlx5_flow_spec spec;
        int num_mod_hdr_actions;
        void *mod_hdr_actions;
+       int mirred_ifindex;
 };
 
 enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
                               struct mlx5e_tc_flow *flow);
 
+static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+                             struct ip_tunnel_info *tun_info,
+                             struct net_device *mirred_dev,
+                             struct net_device **encap_dev,
+                             struct mlx5e_tc_flow *flow);
+
 static struct mlx5_flow_handle *
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
-       struct mlx5_flow_handle *rule;
+       struct net_device *out_dev, *encap_dev = NULL;
+       struct mlx5_flow_handle *rule = NULL;
+       struct mlx5e_rep_priv *rpriv;
+       struct mlx5e_priv *out_priv;
        int err;
 
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+               out_dev = __dev_get_by_index(dev_net(priv->netdev),
+                                            attr->parse_attr->mirred_ifindex);
+               err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+                                        out_dev, &encap_dev, flow);
+               if (err) {
+                       rule = ERR_PTR(err);
+                       if (err != -EAGAIN)
+                               goto err_attach_encap;
+               }
+               out_priv = netdev_priv(encap_dev);
+               rpriv = out_priv->ppriv;
+               attr->out_rep = rpriv->rep;
+       }
+
        err = mlx5_eswitch_add_vlan_action(esw, attr);
        if (err) {
                rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                }
        }
 
-       rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
-       if (IS_ERR(rule))
-               goto err_add_rule;
-
+       /* we get here if (1) there's no error (rule being null) or when
+        * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
+        */
+       if (rule != ERR_PTR(-EAGAIN)) {
+               rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, 
attr);
+               if (IS_ERR(rule))
+                       goto err_add_rule;
+       }
        return rule;
 
 err_add_rule:
@@ -361,6 +391,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
 err_add_vlan:
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
                mlx5e_detach_encap(priv, flow);
+err_attach_encap:
        return rule;
 }
 
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
                              struct mlx5e_encap_entry *e)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_esw_flow_attr *esw_attr;
        struct mlx5e_tc_flow *flow;
        int err;
 
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
        mlx5e_rep_queue_neigh_stats_work(priv);
 
        list_for_each_entry(flow, &e->flows, encap) {
-               flow->esw_attr->encap_id = e->encap_id;
-               flow->rule = mlx5e_tc_add_fdb_flow(priv,
-                                                  flow->esw_attr->parse_attr,
-                                                  flow);
+               esw_attr = flow->esw_attr;
+               esw_attr->encap_id = e->encap_id;
+               flow->rule = mlx5_eswitch_add_offloaded_rule(esw, 
&esw_attr->parse_attr->spec, esw_attr);
                if (IS_ERR(flow->rule)) {
                        err = PTR_ERR(flow->rule);
                        mlx5_core_warn(priv->mdev, "Failed to update cached 
encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                              struct mlx5e_encap_entry *e)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_tc_flow *flow;
-       struct mlx5_fc *counter;
 
        list_for_each_entry(flow, &e->flows, encap) {
                if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
                        flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
-                       counter = mlx5_flow_rule_counter(flow->rule);
-                       mlx5_del_flow_rules(flow->rule);
-                       mlx5_fc_destroy(priv->mdev, counter);
+                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule, 
flow->esw_attr);
                }
        }
 
@@ -1871,7 +1901,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, 
struct tcf_exts *exts,
 
                if (is_tcf_mirred_egress_redirect(a)) {
                        int ifindex = tcf_mirred_ifindex(a);
-                       struct net_device *out_dev, *encap_dev = NULL;
+                       struct net_device *out_dev;
                        struct mlx5e_priv *out_priv;
 
                        out_dev = __dev_get_by_index(dev_net(priv->netdev), 
ifindex);
@@ -1884,17 +1914,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv 
*priv, struct tcf_exts *exts,
                                rpriv = out_priv->ppriv;
                                attr->out_rep = rpriv->rep;
                        } else if (encap) {
-                               err = mlx5e_attach_encap(priv, info,
-                                                        out_dev, &encap_dev, 
flow);
-                               if (err && err != -EAGAIN)
-                                       return err;
+                               parse_attr->mirred_ifindex = ifindex;
+                               parse_attr->tun_info = *info;
+                               attr->parse_attr = parse_attr;
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
                                        MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                        MLX5_FLOW_CONTEXT_ACTION_COUNT;
-                               out_priv = netdev_priv(encap_dev);
-                               rpriv = out_priv->ppriv;
-                               attr->out_rep = rpriv->rep;
-                               attr->parse_attr = parse_attr;
+                               /* attr->out_rep is resolved when we handle 
encap */
                        } else {
                                pr_err("devices %s %s not on same switch HW, 
can't offload forwarding\n",
                                       priv->netdev->name, out_dev->name);
@@ -1972,7 +1998,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, 
__be16 protocol,
        if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
                err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
                if (err < 0)
-                       goto err_handle_encap_flow;
+                       goto err_free;
                flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
        } else {
                err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -1983,10 +2009,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, 
__be16 protocol,
 
        if (IS_ERR(flow->rule)) {
                err = PTR_ERR(flow->rule);
-               goto err_free;
+               if (err != -EAGAIN)
+                       goto err_free;
        }
 
-       flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+       if (err != -EAGAIN)
+               flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+
        err = rhashtable_insert_fast(&tc->ht, &flow->node,
                                     tc->ht_params);
        if (err)
@@ -2000,16 +2029,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, 
__be16 protocol,
 err_del_rule:
        mlx5e_tc_del_flow(priv, flow);
 
-err_handle_encap_flow:
-       if (err == -EAGAIN) {
-               err = rhashtable_insert_fast(&tc->ht, &flow->node,
-                                            tc->ht_params);
-               if (err)
-                       mlx5e_tc_del_flow(priv, flow);
-               else
-                       return 0;
-       }
-
 err_free:
        kvfree(parse_attr);
        kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c 
b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 4b6b03d6297f..8ccb68a49c65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
+       unsigned long flags;
 
-       spin_lock(&health->wq_lock);
+       spin_lock_irqsave(&health->wq_lock, flags);
        set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
-       spin_unlock(&health->wq_lock);
+       spin_unlock_irqrestore(&health->wq_lock, flags);
        cancel_delayed_work_sync(&dev->priv.health.recover_work);
 }
 
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2bbda71818ad..e0a7176e1d39 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
 
 static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
 {
-#ifdef __BIG_ENDIAN
-       return (vni[0] == tun_id[2]) &&
-              (vni[1] == tun_id[1]) &&
-              (vni[2] == tun_id[0]);
-#else
        return !memcmp(vni, &tun_id[5], 3);
-#endif
 }
 
 static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 22f133ea8d7b..2bdd71c4fcd1 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -197,8 +197,8 @@ static int ipvtap_init(void)
 {
        int err;
 
-       err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
-
+       err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
+                             THIS_MODULE);
        if (err)
                goto out1;
 
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 98e4deaa3a6a..5ab1b8849c30 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        if (unlikely(ret < 0)) {
+               aead_request_free(req);
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
                return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        if (unlikely(ret < 0)) {
+               aead_request_free(req);
                kfree_skb(skb);
                return ERR_PTR(ret);
        }
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 91e7b19bbf86..a84691bd75a7 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -204,8 +204,8 @@ static int macvtap_init(void)
 {
        int err;
 
-       err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
-
+       err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
+                             THIS_MODULE);
        if (err)
                goto out1;
 
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a404552555d4..6722b8354618 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct 
rtnl_link_stats64 *stats64)
 
 static int ppp_dev_init(struct net_device *dev)
 {
+       struct ppp *ppp;
+
        netdev_lockdep_set_classes(dev);
+
+       ppp = netdev_priv(dev);
+       /* Let the netdevice take a reference on the ppp file. This ensures
+        * that ppp_destroy_interface() won't run before the device gets
+        * unregistered.
+        */
+       atomic_inc(&ppp->file.refcnt);
+
        return 0;
 }
 
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
        wake_up_interruptible(&ppp->file.rwait);
 }
 
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+       struct ppp *ppp;
+
+       ppp = netdev_priv(dev);
+       if (atomic_dec_and_test(&ppp->file.refcnt))
+               ppp_destroy_interface(ppp);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
        .ndo_uninit      = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
        dev->tx_queue_len = 3;
        dev->type = ARPHRD_PPP;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+       dev->priv_destructor = ppp_dev_priv_destructor;
        netif_keep_dst(dev);
 }
 
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 3570c7576993..38edfcfd3c3a 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
                                             &tap_proto, 0);
        if (!q)
                goto err;
+       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+               sk_free(&q->sk);
+               goto err;
+       }
 
        RCU_INIT_POINTER(q->sock.wq, &q->wq);
        init_waitqueue_head(&q->wq.wait);
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file 
*file)
        if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & 
NETIF_F_SG))
                sock_set_flag(&q->sk, SOCK_ZEROCOPY);
 
-       err = -ENOMEM;
-       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
-               goto err_array;
-
        err = tap_set_queue(tap, file, q);
-       if (err)
-               goto err_queue;
+       if (err) {
+               /* tap_sock_destruct() will take care of freeing skb_array */
+               goto err_put;
+       }
 
        dev_put(tap->dev);
 
        rtnl_unlock();
        return err;
 
-err_queue:
-       skb_array_cleanup(&q->skb_array);
-err_array:
+err_put:
        sock_put(&q->sk);
 err:
        if (tap)
@@ -1035,6 +1035,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETSNDBUF:
                if (get_user(s, sp))
                        return -EFAULT;
+               if (s <= 0)
+                       return -EINVAL;
 
                q->sk.sk_sndbuf = s;
                return 0;
@@ -1252,8 +1254,8 @@ static int tap_list_add(dev_t major, const char 
*device_name)
        return 0;
 }
 
-int tap_create_cdev(struct cdev *tap_cdev,
-                   dev_t *tap_major, const char *device_name)
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+                   const char *device_name, struct module *module)
 {
        int err;
 
@@ -1262,6 +1264,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
                goto out1;
 
        cdev_init(tap_cdev, &tap_fops);
+       tap_cdev->owner = module;
        err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
        if (err)
                goto out2;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cb1f7747adad..d1cb1ff83251 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1813,6 +1813,9 @@ static int tun_set_iff(struct net *net, struct file 
*file, struct ifreq *ifr)
 
                if (!dev)
                        return -ENOMEM;
+               err = dev_get_valid_name(net, dev, name);
+               if (err < 0)
+                       goto err_free_dev;
 
                dev_net_set(dev, net);
                dev->rtnl_link_ops = &tun_link_ops;
@@ -2216,6 +2219,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned 
int cmd,
                        ret = -EFAULT;
                        break;
                }
+               if (sndbuf <= 0) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                tun->sndbuf = sndbuf;
                tun_set_sndbuf(tun);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index eee82ca55b7b..cf4f5fff3e50 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,12 +202,13 @@ get_endpoints(struct usbtest_dev *dev, struct 
usb_interface *intf)
                        return tmp;
        }
 
-       if (in) {
+       if (in)
                dev->in_pipe = usb_rcvbulkpipe(udev,
                        in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+       if (out)
                dev->out_pipe = usb_sndbulkpipe(udev,
                        out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
-       }
+
        if (iso_in) {
                dev->iso_in = &iso_in->desc;
                dev->in_iso_pipe = usb_rcvisocpipe(udev,
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 4837157da0dc..9ae41cdd0d4c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
 int tap_get_minor(dev_t major, struct tap_dev *tap);
 void tap_free_minor(dev_t major, struct tap_dev *tap);
 int tap_queue_resize(struct tap_dev *tap);
-int tap_create_cdev(struct cdev *tap_cdev,
-                   dev_t *tap_major, const char *device_name);
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+                   const char *device_name, struct module *module);
 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
 
 #endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c99ba7914c0a..a6d3c8b6cb93 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3702,6 +3702,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, 
const char *name,
                                    unsigned char name_assign_type,
                                    void (*setup)(struct net_device *),
                                    unsigned int txqs, unsigned int rxqs);
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+                      const char *name);
+
 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
        alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa95053dfc78..db8162dd8c0b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -96,7 +96,7 @@ struct inet_request_sock {
        kmemcheck_bitfield_end(flags);
        u32                     ir_mark;
        union {
-               struct ip_options_rcu   *opt;
+               struct ip_options_rcu __rcu     *ireq_opt;
 #if IS_ENABLED(CONFIG_IPV6)
                struct {
                        struct ipv6_txoptions   *ipv6_opt;
@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct 
sock *sk,
        return sk->sk_bound_dev_if;
 }
 
+static inline struct ip_options_rcu *ireq_opt_deref(const struct 
inet_request_sock *ireq)
+{
+       return rcu_dereference_check(ireq->ireq_opt,
+                                    refcount_read(&ireq->req.rsk_refcnt) > 0);
+}
+
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 48978125947b..150c2c66897a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1750,12 +1750,12 @@ static inline void tcp_highest_sack_reset(struct sock 
*sk)
        tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
 }
 
-/* Called when old skb is about to be deleted (to be combined with new skb) */
-static inline void tcp_highest_sack_combine(struct sock *sk,
+/* Called when old skb is about to be deleted and replaced by new skb */
+static inline void tcp_highest_sack_replace(struct sock *sk,
                                            struct sk_buff *old,
                                            struct sk_buff *new)
 {
-       if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
+       if (old == tcp_highest_sack(sk))
                tcp_sk(sk)->highest_sack = new;
 }
 
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3bc890716c89..de2152730809 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
                }
                *vinfo_last = NULL;
 
-               return 0;
+               return err;
        }
 
        return br_vlan_info(br, p, cmd, vinfo_curr);
diff --git a/net/core/dev.c b/net/core/dev.c
index 6fa30a4c60ef..4f9ec923d21b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1146,9 +1146,8 @@ static int dev_alloc_name_ns(struct net *net,
        return ret;
 }
 
-static int dev_get_valid_name(struct net *net,
-                             struct net_device *dev,
-                             const char *name)
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+                      const char *name)
 {
        BUG_ON(!net);
 
@@ -1164,6 +1163,7 @@ static int dev_get_valid_name(struct net *net,
 
        return 0;
 }
+EXPORT_SYMBOL(dev_get_valid_name);
 
 /**
  *     dev_change_name - change name of a device
diff --git a/net/core/sock.c b/net/core/sock.c
index 0967da925022..e48424ddbc6b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1674,6 +1674,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const 
gfp_t priority)
                newsk->sk_userlocks     = sk->sk_userlocks & 
~SOCK_BINDPORT_LOCK;
 
                sock_reset_flag(newsk, SOCK_DONE);
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
                filter = rcu_dereference(sk->sk_filter);
@@ -1706,8 +1707,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const 
gfp_t priority)
                atomic64_set(&newsk->sk_cookie, 0);
 
                mem_cgroup_sk_alloc(newsk);
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
-
                /*
                 * Before updating sk_refcnt, we must commit prior changes to 
memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index eed1ebf7f29d..b1e0dbea1e8c 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
         * soft irq of receive path or setsockopt from process context
         */
        spin_lock_bh(&reuseport_lock);
-       WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
-                                           lockdep_is_held(&reuseport_lock)),
-                 "multiple allocations for the same socket");
+
+       /* Allocation attempts can occur concurrently via the setsockopt path
+        * and the bind/hash path.  Nothing to do when we lose the race.
+        */
+       if (rcu_dereference_protected(sk->sk_reuseport_cb,
+                                     lockdep_is_held(&reuseport_lock)))
+               goto out;
+
        reuse = __reuseport_alloc(INIT_SOCKS);
        if (!reuse) {
                spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
        reuse->num_socks = 1;
        rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
 
+out:
        spin_unlock_bh(&reuseport_lock);
 
        return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 1b202f16531f..bc70d96d762a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock 
*sk,
        sk_daddr_set(newsk, ireq->ir_rmt_addr);
        sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newinet->inet_saddr     = ireq->ir_loc_addr;
-       newinet->inet_opt       = ireq->opt;
-       ireq->opt          = NULL;
+       RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
        newinet->mc_index  = inet_iif(skb);
        newinet->mc_ttl    = ip_hdr(skb)->ttl;
        newinet->inet_id   = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock 
*sk,
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
        *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-
+       if (*own_req)
+               ireq->ireq_opt = NULL;
+       else
+               newinet->inet_opt = NULL;
        return newsk;
 
 exit_overflow:
@@ -441,6 +443,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock 
*sk,
        __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 put_and_exit:
+       newinet->inet_opt = NULL;
        inet_csk_prepare_forced_close(newsk);
        dccp_done(newsk);
        goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, 
struct request_sock *req
                                                              
ireq->ir_rmt_addr);
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq->opt);
+                                           ireq_opt_deref(ireq));
                err = net_xmit_eval(err);
        }
 
@@ -548,7 +551,7 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, 
struct sk_buff *rxskb)
 static void dccp_v4_reqsk_destructor(struct request_sock *req)
 {
        dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
-       kfree(inet_rsk(req)->opt);
+       kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 }
 
 void dccp_syn_ack_timeout(const struct request_sock *req)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 20bc9c56fca0..278b48d70bd9 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
                if (!ethernet)
                        return -EINVAL;
                ethernet_dev = of_find_net_device_by_node(ethernet);
+               if (!ethernet_dev)
+                       return -EPROBE_DEFER;
        } else {
                ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
+               if (!ethernet_dev)
+                       return -EPROBE_DEFER;
                dev_put(ethernet_dev);
        }
 
-       if (!ethernet_dev)
-               return -EPROBE_DEFER;
-
        if (!dst->cpu_dp) {
                dst->cpu_dp = port;
                dst->cpu_dp->netdev = ethernet_dev;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2ae8f54cb321..82178cc69c96 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
        buf = NULL;
 
        req_inet = inet_rsk(req);
-       opt = xchg(&req_inet->opt, opt);
+       opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
        if (opt)
                kfree_rcu(opt, rcu);
 
@@ -1973,11 +1973,13 @@ int cipso_v4_req_setattr(struct request_sock *req,
  * values on failure.
  *
  */
-static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
+static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
 {
+       struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
        int hdr_delta = 0;
-       struct ip_options_rcu *opt = *opt_ptr;
 
+       if (!opt || opt->opt.cipso == 0)
+               return 0;
        if (opt->opt.srr || opt->opt.rr || opt->opt.ts || 
opt->opt.router_alert) {
                u8 cipso_len;
                u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu 
**opt_ptr)
  */
 void cipso_v4_sock_delattr(struct sock *sk)
 {
-       int hdr_delta;
-       struct ip_options_rcu *opt;
        struct inet_sock *sk_inet;
+       int hdr_delta;
 
        sk_inet = inet_sk(sk);
-       opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
-       if (!opt || opt->opt.cipso == 0)
-               return;
 
        hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
        if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
  */
 void cipso_v4_req_delattr(struct request_sock *req)
 {
-       struct ip_options_rcu *opt;
-       struct inet_request_sock *req_inet;
-
-       req_inet = inet_rsk(req);
-       opt = req_inet->opt;
-       if (!opt || opt->opt.cipso == 0)
-               return;
-
-       cipso_v4_delopt(&req_inet->opt);
+       cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
 }
 
 /**
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index d5cac99170b1..8c72034df28e 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -98,7 +98,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                greh = (struct gre_base_hdr *)skb_transport_header(skb);
                pcsum = (__sum16 *)(greh + 1);
 
-               if (gso_partial) {
+               if (gso_partial && skb_is_gso(skb)) {
                        unsigned int partial_adj;
 
                        /* Adjust checksum to account for the fact that
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4089c013cb03..4438990cf65e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -537,9 +537,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct net *net = read_pnet(&ireq->ireq_net);
-       struct ip_options_rcu *opt = ireq->opt;
+       struct ip_options_rcu *opt;
        struct rtable *rt;
 
+       opt = ireq_opt_deref(ireq);
+
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -573,10 +575,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct 
sock *sk,
        struct flowi4 *fl4;
        struct rtable *rt;
 
+       opt = rcu_dereference(ireq->ireq_opt);
        fl4 = &newinet->cork.fl.u.ip4;
 
-       rcu_read_lock();
-       opt = rcu_dereference(newinet->inet_opt);
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -589,13 +590,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct 
sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
-       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
-       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 2e3389d614d1..3eeecee0b21f 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -449,10 +449,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                        return reuseport_add_sock(sk, sk2);
        }
 
-       /* Initial allocation may have already happened via setsockopt */
-       if (!rcu_access_pointer(sk->sk_reuseport_cb))
-               return reuseport_alloc(sk);
-       return 0;
+       return reuseport_alloc(sk);
 }
 
 int __inet_hash(struct sock *sk, struct sock *osk)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index fb1ad22b5e29..cdd627355ed1 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
 
 static int ipip_err(struct sk_buff *skb, u32 info)
 {
-
-/* All the routers (except for Linux) return only
-   8 bytes of packet payload. It means, that precise relaying of
-   ICMP in the real Internet is absolutely infeasible.
- */
+       /* All the routers (except for Linux) return only
+        * 8 bytes of packet payload. It means, that precise relaying of
+        * ICMP in the real Internet is absolutely infeasible.
+        */
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
        const struct iphdr *iph = (const struct iphdr *)skb->data;
-       struct ip_tunnel *t;
-       int err;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
+       struct ip_tunnel *t;
+       int err = 0;
+
+       switch (type) {
+       case ICMP_DEST_UNREACH:
+               switch (code) {
+               case ICMP_SR_FAILED:
+                       /* Impossible event. */
+                       goto out;
+               default:
+                       /* All others are translated to HOST_UNREACH.
+                        * rfc2003 contains "deep thoughts" about NET_UNREACH,
+                        * I believe they are just ether pollution. --ANK
+                        */
+                       break;
+               }
+               break;
+
+       case ICMP_TIME_EXCEEDED:
+               if (code != ICMP_EXC_TTL)
+                       goto out;
+               break;
+
+       case ICMP_REDIRECT:
+               break;
+
+       default:
+               goto out;
+       }
 
-       err = -ENOENT;
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                             iph->daddr, iph->saddr, 0);
-       if (!t)
+       if (!t) {
+               err = -ENOENT;
                goto out;
+       }
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, iph->protocol, 0);
-               err = 0;
+               ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
+                                iph->protocol, 0);
                goto out;
        }
 
        if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
-                             iph->protocol, 0);
-               err = 0;
+               ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
                goto out;
        }
 
-       if (t->parms.iph.daddr == 0)
+       if (t->parms.iph.daddr == 0) {
+               err = -ENOENT;
                goto out;
+       }
 
-       err = 0;
        if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
                goto out;
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c5aa25be7108..72afa4cfb022 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2507,7 +2507,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, 
struct dst_entry *dst_or
        struct rtable *ort = (struct rtable *) dst_orig;
        struct rtable *rt;
 
-       rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
+       rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
        if (rt) {
                struct dst_entry *new = &rt->dst;
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 03ad8778c395..0f914fda5bf3 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
        /* We throwed the options of the initial SYN away, so we hope
         * the ACK carries the same options again (see RFC1122 4.2.3.8)
         */
-       ireq->opt = tcp_v4_save_options(skb);
+       RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
 
        if (security_inet_conn_request(sk, skb, req)) {
                reqsk_free(req);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bab7f0493098..e92e5dbcb3d6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6235,7 +6235,7 @@ struct request_sock *inet_reqsk_alloc(const struct 
request_sock_ops *ops,
                struct inet_request_sock *ireq = inet_rsk(req);
 
                kmemcheck_annotate_bitfield(ireq, flags);
-               ireq->opt = NULL;
+               ireq->ireq_opt = NULL;
 #if IS_ENABLED(CONFIG_IPV6)
                ireq->pktopts = NULL;
 #endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b1441bc8192f..78835f681538 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -878,7 +878,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct 
dst_entry *dst,
 
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq->opt);
+                                           ireq_opt_deref(ireq));
                err = net_xmit_eval(err);
        }
 
@@ -890,7 +890,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct 
dst_entry *dst,
  */
 static void tcp_v4_reqsk_destructor(struct request_sock *req)
 {
-       kfree(inet_rsk(req)->opt);
+       kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1269,7 +1269,7 @@ static void tcp_v4_init_req(struct request_sock *req,
 
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
-       ireq->opt = tcp_v4_save_options(skb);
+       RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
 }
 
 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1356,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, 
struct sk_buff *skb,
        sk_daddr_set(newsk, ireq->ir_rmt_addr);
        sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newsk->sk_bound_dev_if = ireq->ir_iif;
-       newinet->inet_saddr           = ireq->ir_loc_addr;
-       inet_opt              = ireq->opt;
-       rcu_assign_pointer(newinet->inet_opt, inet_opt);
-       ireq->opt             = NULL;
+       newinet->inet_saddr   = ireq->ir_loc_addr;
+       inet_opt              = rcu_dereference(ireq->ireq_opt);
+       RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
        newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
@@ -1404,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, 
struct sk_buff *skb,
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
        *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-       if (*own_req)
+       if (likely(*own_req)) {
                tcp_move_syn(newtp, req);
-
+               ireq->ireq_opt = NULL;
+       } else {
+               newinet->inet_opt = NULL;
+       }
        return newsk;
 
 exit_overflow:
@@ -1417,6 +1419,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, 
struct sk_buff *skb,
        tcp_listendrop(sk);
        return NULL;
 put_and_exit:
+       newinet->inet_opt = NULL;
        inet_csk_prepare_forced_close(newsk);
        tcp_done(newsk);
        goto exit;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 40f7c8ee9ba6..58587b0e2b5d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2094,6 +2094,7 @@ static int tcp_mtu_probe(struct sock *sk)
        nskb->ip_summed = skb->ip_summed;
 
        tcp_insert_write_queue_before(nskb, skb, sk);
+       tcp_highest_sack_replace(sk, skb, nskb);
 
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
@@ -2271,6 +2272,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
 
        sent_pkts = 0;
 
+       tcp_mstamp_refresh(tp);
        if (!push_one) {
                /* Do MTU probing. */
                result = tcp_mtu_probe(sk);
@@ -2282,7 +2284,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
        }
 
        max_segs = tcp_tso_segs(sk, mss_now);
-       tcp_mstamp_refresh(tp);
        while ((skb = tcp_send_head(sk))) {
                unsigned int limit;
 
@@ -2694,7 +2695,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct 
sk_buff *skb)
                else if (!skb_shift(skb, next_skb, next_skb_size))
                        return false;
        }
-       tcp_highest_sack_combine(sk, next_skb, skb);
+       tcp_highest_sack_replace(sk, next_skb, skb);
 
        tcp_unlink_write_queue(next_skb, sk);
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 84861d71face..f9e1bcfb6e2d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct 
udp_hslot *hslot)
                }
        }
 
-       /* Initial allocation may have already happened via setsockopt */
-       if (!rcu_access_pointer(sk->sk_reuseport_cb))
-               return reuseport_alloc(sk);
-       return 0;
+       return reuseport_alloc(sk);
 }
 
 /**
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0932c85b42af..6401574cd638 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -122,7 +122,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct 
sk_buff *skb,
                 * will be using a length value equal to only one MSS sized
                 * segment instead of the entire frame.
                 */
-               if (gso_partial) {
+               if (gso_partial && skb_is_gso(skb)) {
                        uh->len = htons(skb_shinfo(skb)->gso_size +
                                        SKB_GSO_CB(skb)->data_offset +
                                        skb->head - (unsigned char *)uh);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ba757c28a301..bdc93e51427d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3367,6 +3367,7 @@ static void addrconf_permanent_addr(struct net_device 
*dev)
                if ((ifp->flags & IFA_F_PERMANENT) &&
                    fixup_permanent_addr(idev, ifp) < 0) {
                        write_unlock_bh(&idev->lock);
+                       in6_ifa_hold(ifp);
                        ipv6_del_addr(ifp);
                        write_lock_bh(&idev->lock);
 
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 8081bafe441b..15535ee327c5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct 
ipv6_txoptions *opt_space,
        }
        opt_space->dst1opt = fopt->dst1opt;
        opt_space->opt_flen = fopt->opt_flen;
+       opt_space->tot_len = fopt->tot_len;
        return opt_space;
 }
 EXPORT_SYMBOL_GPL(fl6_merge_options);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 1602b491b281..59c121b932ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct 
inet6_skb_parm *opt,
        case ICMPV6_DEST_UNREACH:
                net_dbg_ratelimited("%s: Path to destination invalid or 
inactive!\n",
                                    t->parms.name);
-               break;
+               if (code != ICMPV6_PORT_UNREACH)
+                       break;
+               return;
        case ICMPV6_TIME_EXCEED:
                if (code == ICMPV6_EXC_HOPLIMIT) {
                        net_dbg_ratelimited("%s: Too small hop limit or routing 
loop in tunnel!\n",
                                            t->parms.name);
+                       break;
                }
-               break;
+               return;
        case ICMPV6_PARAMPROB:
                teli = 0;
                if (code == ICMPV6_HDR_FIELD)
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct 
inet6_skb_parm *opt,
                        net_dbg_ratelimited("%s: Recipient unable to parse 
tunneled packet!\n",
                                            t->parms.name);
                }
-               break;
+               return;
        case ICMPV6_PKT_TOOBIG:
                mtu = be32_to_cpu(info) - offset - t->tun_hlen;
                if (t->dev->type == ARPHRD_ETHER)
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct 
inet6_skb_parm *opt,
                if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
                t->dev->mtu = mtu;
-               break;
+               return;
        }
 
        if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                               __u32 *pmtu, __be16 proto)
 {
        struct ip6_tnl *tunnel = netdev_priv(dev);
-       __be16 protocol = (dev->type == ARPHRD_ETHER) ?
-                         htons(ETH_P_TEB) : proto;
+       struct dst_entry *dst = skb_dst(skb);
+       __be16 protocol;
 
        if (dev->type == ARPHRD_ETHER)
                IPCB(skb)->flags = 0;
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                tunnel->o_seqno++;
 
        /* Push GRE header. */
+       protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
        gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
                         protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
+       /* TooBig packet may have updated dst->dev's mtu */
+       if (dst && dst_mtu(dst) > dst->dev->mtu)
+               dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
        return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
                            NEXTHDR_GRE);
 }
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cdb3728faca7..4a87f9428ca5 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
        for (skb = segs; skb; skb = skb->next) {
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
-               if (gso_partial)
+               if (gso_partial && skb_is_gso(skb))
                        payload_len = skb_shinfo(skb)->gso_size +
                                      SKB_GSO_CB(skb)->data_offset +
                                      skb->head - (unsigned char *)(ipv6h + 1);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2dfe50d8d609..3ce9ab29bd37 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1224,11 +1224,11 @@ static int ip6_setup_cork(struct sock *sk, struct 
inet_cork_full *cork,
                if (WARN_ON(v6_cork->opt))
                        return -EINVAL;
 
-               v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+               v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
                if (unlikely(!v6_cork->opt))
                        return -ENOBUFS;
 
-               v6_cork->opt->tot_len = opt->tot_len;
+               v6_cork->opt->tot_len = sizeof(*opt);
                v6_cork->opt->opt_flen = opt->opt_flen;
                v6_cork->opt->opt_nflen = opt->opt_nflen;
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2d0e7798c793..44eebe738c09 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1251,7 +1251,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, 
struct dst_entry *dst_ori
        struct dst_entry *new = NULL;
 
        rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
-                      DST_OBSOLETE_NONE, 0);
+                      DST_OBSOLETE_DEAD, 0);
        if (rt) {
                rt6_info_init(rt);
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f0edb7209079..412c513d69b3 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -584,6 +584,7 @@ static int pppol2tp_connect(struct socket *sock, struct 
sockaddr *uservaddr,
        u32 tunnel_id, peer_tunnel_id;
        u32 session_id, peer_session_id;
        bool drop_refcnt = false;
+       bool drop_tunnel = false;
        int ver = 2;
        int fd;
 
@@ -652,7 +653,9 @@ static int pppol2tp_connect(struct socket *sock, struct 
sockaddr *uservaddr,
        if (tunnel_id == 0)
                goto end;
 
-       tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
+       tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
+       if (tunnel)
+               drop_tunnel = true;
 
        /* Special case: create tunnel context if session_id and
         * peer_session_id is 0. Otherwise look up tunnel using supplied
@@ -781,6 +784,8 @@ static int pppol2tp_connect(struct socket *sock, struct 
sockaddr *uservaddr,
 end:
        if (drop_refcnt)
                l2tp_session_dec_refcount(session);
+       if (drop_tunnel)
+               l2tp_tunnel_dec_refcount(tunnel);
        release_sock(sk);
 
        return error;
@@ -993,6 +998,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session 
*session,
                 session->name, cmd, arg);
 
        sk = ps->sock;
+       if (!sk)
+               return -EBADR;
+
        sock_hold(sk);
 
        switch (cmd) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a98fc2b5e0dc..938049395f90 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jb...@suse.cz>
  * Copyright 2007-2008 Johannes Berg <johan...@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2015      Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <net/mac80211.h>
+#include <crypto/algapi.h>
 #include <asm/unaligned.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
        ieee80211_key_free_common(key);
 }
 
+static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_key *old,
+                                   struct ieee80211_key *new)
+{
+       u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
+       u8 *tk_old, *tk_new;
+
+       if (!old || new->conf.keylen != old->conf.keylen)
+               return false;
+
+       tk_old = old->conf.key;
+       tk_new = new->conf.key;
+
+       /*
+        * In station mode, don't compare the TX MIC key, as it's never used
+        * and offloaded rekeying may not care to send it to the host. This
+        * is the case in iwlwifi, for example.
+        */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
+           new->conf.keylen == WLAN_KEY_LEN_TKIP &&
+           !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+               memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
+               memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
+               memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+               memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+               tk_old = tkip_old;
+               tk_new = tkip_new;
+       }
+
+       return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
+}
+
 int ieee80211_key_link(struct ieee80211_key *key,
                       struct ieee80211_sub_if_data *sdata,
                       struct sta_info *sta)
@@ -620,9 +654,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
 
        pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
        idx = key->conf.keyidx;
-       key->local = sdata->local;
-       key->sdata = sdata;
-       key->sta = sta;
 
        mutex_lock(&sdata->local->key_mtx);
 
@@ -633,6 +664,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
        else
                old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
 
+       /*
+        * Silently accept key re-installation without really installing the
+        * new version of the key to avoid nonce reuse or replay issues.
+        */
+       if (ieee80211_key_identical(sdata, old_key, key)) {
+               ieee80211_key_free_unused(key);
+               ret = 0;
+               goto out;
+       }
+
+       key->local = sdata->local;
+       key->sdata = sdata;
+       key->sta = sta;
+
        increment_tailroom_need_count(sdata);
 
        ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +693,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
                ret = 0;
        }
 
+ out:
        mutex_unlock(&sdata->local->key_mtx);
 
        return ret;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7e794ad50cb0..09c8dbbd2d70 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2258,16 +2258,17 @@ int __netlink_dump_start(struct sock *ssk, struct 
sk_buff *skb,
        cb->min_dump_alloc = control->min_dump_alloc;
        cb->skb = skb;
 
+       if (cb->start) {
+               ret = cb->start(cb);
+               if (ret)
+                       goto error_unlock;
+       }
+
        nlk->cb_running = true;
 
        mutex_unlock(nlk->cb_mutex);
 
-       ret = 0;
-       if (cb->start)
-               ret = cb->start(cb);
-
-       if (!ret)
-               ret = netlink_dump(sk);
+       ret = netlink_dump(sk);
 
        sock_put(sk);
 
@@ -2298,6 +2299,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr 
*nlh, int err,
        size_t tlvlen = 0;
        struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
        unsigned int flags = 0;
+       bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
 
        /* Error messages get the original request appened, unless the user
         * requests to cap the error message, and get extra error data if
@@ -2308,7 +2310,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr 
*nlh, int err,
                        payload += nlmsg_len(nlh);
                else
                        flags |= NLM_F_CAPPED;
-               if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+               if (nlk_has_extack && extack) {
                        if (extack->_msg)
                                tlvlen += nla_total_size(strlen(extack->_msg) + 
1);
                        if (extack->bad_attr)
@@ -2317,8 +2319,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr 
*nlh, int err,
        } else {
                flags |= NLM_F_CAPPED;
 
-               if (nlk->flags & NETLINK_F_EXT_ACK &&
-                   extack && extack->cookie_len)
+               if (nlk_has_extack && extack && extack->cookie_len)
                        tlvlen += nla_total_size(extack->cookie_len);
        }
 
@@ -2346,7 +2347,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr 
*nlh, int err,
        errmsg->error = err;
        memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : 
sizeof(*nlh));
 
-       if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+       if (nlk_has_extack && extack) {
                if (err) {
                        if (extack->_msg)
                                WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 29d7b7e5b128..b0c8fee3d53d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1771,7 +1771,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 
type_flags)
 
 out:
        if (err && rollover) {
-               kfree(rollover);
+               kfree_rcu(rollover, rcu);
                po->rollover = NULL;
        }
        mutex_unlock(&fanout_mutex);
@@ -1798,8 +1798,10 @@ static struct packet_fanout *fanout_release(struct sock 
*sk)
                else
                        f = NULL;
 
-               if (po->rollover)
+               if (po->rollover) {
                        kfree_rcu(po->rollover, rcu);
+                       po->rollover = NULL;
+               }
        }
        mutex_unlock(&fanout_mutex);
 
@@ -3853,6 +3855,7 @@ static int packet_getsockopt(struct socket *sock, int 
level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
+       struct packet_rollover *rollover;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3931,13 +3934,18 @@ static int packet_getsockopt(struct socket *sock, int 
level, int optname,
                       0);
                break;
        case PACKET_ROLLOVER_STATS:
-               if (!po->rollover)
+               rcu_read_lock();
+               rollover = rcu_dereference(po->rollover);
+               if (rollover) {
+                       rstats.tp_all = atomic_long_read(&rollover->num);
+                       rstats.tp_huge = atomic_long_read(&rollover->num_huge);
+                       rstats.tp_failed = 
atomic_long_read(&rollover->num_failed);
+                       data = &rstats;
+                       lv = sizeof(rstats);
+               }
+               rcu_read_unlock();
+               if (!rollover)
                        return -EINVAL;
-               rstats.tp_all = atomic_long_read(&po->rollover->num);
-               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
-               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
-               data = &rstats;
-               lv = sizeof(rstats);
                break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 4fb5a3222d0d..7935db0d787c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -307,6 +307,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 
handle)
 {
        struct Qdisc *q;
 
+       if (!handle)
+               return NULL;
        q = qdisc_match_from_root(dev->qdisc, handle);
        if (q)
                goto out;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 41eb2ec10460..1678d9ea7740 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct 
sctp_transport *t,
 {
        struct dst_entry *dst;
 
-       if (!t)
+       if (sock_owned_by_user(sk) || !t)
                return;
        dst = sctp_transport_dst_check(t);
        if (dst)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a4b6ffb61495..1344e3a411ae 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, 
union sctp_addr *addr)
                        net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
                        dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
-                       if (!dev ||
-                           !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
+                       if (!dev || !(opt->inet.freebind ||
+                                     net->ipv6.sysctl.ip_nonlocal_bind ||
+                                     ipv6_chk_addr(net, &addr->v6.sin6_addr,
+                                                   dev, 0))) {
                                rcu_read_unlock();
                                return 0;
                        }
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8d760863bc41..3d79085eb4e0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -169,6 +169,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk 
*chunk)
        sk_mem_charge(sk, chunk->skb->truesize);
 }
 
+static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+{
+       skb_orphan(chunk->skb);
+}
+
+static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+                                      void (*cb)(struct sctp_chunk *))
+
+{
+       struct sctp_outq *q = &asoc->outqueue;
+       struct sctp_transport *t;
+       struct sctp_chunk *chunk;
+
+       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+               list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+                       cb(chunk);
+
+       list_for_each_entry(chunk, &q->retransmit, list)
+               cb(chunk);
+
+       list_for_each_entry(chunk, &q->sacked, list)
+               cb(chunk);
+
+       list_for_each_entry(chunk, &q->abandoned, list)
+               cb(chunk);
+
+       list_for_each_entry(chunk, &q->out_chunk_list, list)
+               cb(chunk);
+}
+
 /* Verify that this is a valid address. */
 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
                                   int len)
@@ -8196,7 +8226,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct 
sock *newsk,
         * paths won't try to lock it and then oldsk.
         */
        lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+       sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
        sctp_assoc_migrate(assoc, newsk);
+       sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
 
        /* If the association on the newsk is already closed before accept()
         * is called, set RCV_SHUTDOWN flag.
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 4d9679701a6d..384c84e83462 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
        err = -ENOENT;
        if (sk == NULL)
                goto out_nosk;
+       if (!net_eq(sock_net(sk), net))
+               goto out;
 
        err = sock_diag_check_cookie(sk, req->udiag_cookie);
        if (err)

Reply via email to