commit:     a0d0349612049072e45ad7d28ff2422914b4b2dd
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 12 09:01:01 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Feb 12 09:01:01 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a0d03496

linux kernel 4.15.3

 0000_README             |   4 +
 1002_linux-4.15.3.patch | 776 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 780 insertions(+)

diff --git a/0000_README b/0000_README
index db575f6..635b977 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.15.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.15.2
 
+Patch:  1002_linux-4.15.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.15.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-4.15.3.patch b/1002_linux-4.15.3.patch
new file mode 100644
index 0000000..7d0d7a2
--- /dev/null
+++ b/1002_linux-4.15.3.patch
@@ -0,0 +1,776 @@
+diff --git a/Makefile b/Makefile
+index 54f1bc10b531..13566ad7863a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 15
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index 9267cbdb14d2..3ced1ba1fd11 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -198,11 +198,13 @@ static void sg_init_aead(struct scatterlist *sg, char 
*xbuf[XBUFSIZE],
+       }
+ 
+       sg_init_table(sg, np + 1);
+-      np--;
++      if (rem)
++              np--;
+       for (k = 0; k < np; k++)
+               sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
+ 
+-      sg_set_buf(&sg[k + 1], xbuf[k], rem);
++      if (rem)
++              sg_set_buf(&sg[k + 1], xbuf[k], rem);
+ }
+ 
+ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
+diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
+index 016d7427ebfa..761d8279abca 100644
+--- a/drivers/gpio/gpio-uniphier.c
++++ b/drivers/gpio/gpio-uniphier.c
+@@ -505,4 +505,4 @@ module_platform_driver(uniphier_gpio_driver);
+ 
+ MODULE_AUTHOR("Masahiro Yamada <yamada.masah...@socionext.com>");
+ MODULE_DESCRIPTION("UniPhier GPIO driver");
+-MODULE_LICENSE("GPL");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c 
b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+index 46768c056193..0c28d0b995cc 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+@@ -115,3 +115,6 @@ struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct 
mtk_vcodec_dev *dev)
+       return ctx;
+ }
+ EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Mediatek video codec driver");
+diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c 
b/drivers/media/platform/soc_camera/soc_scale_crop.c
+index 270ec613c27c..6164102e6f9f 100644
+--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
++++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
+@@ -420,3 +420,7 @@ void soc_camera_calc_client_output(struct 
soc_camera_device *icd,
+       mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
+ }
+ EXPORT_SYMBOL(soc_camera_calc_client_output);
++
++MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
++MODULE_AUTHOR("Guennadi Liakhovetski <ker...@pengutronix.de>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c 
b/drivers/media/platform/tegra-cec/tegra_cec.c
+index 807c94c70049..92f93a880015 100644
+--- a/drivers/media/platform/tegra-cec/tegra_cec.c
++++ b/drivers/media/platform/tegra-cec/tegra_cec.c
+@@ -493,3 +493,8 @@ static struct platform_driver tegra_cec_driver = {
+ };
+ 
+ module_platform_driver(tegra_cec_driver);
++
++MODULE_DESCRIPTION("Tegra HDMI CEC driver");
++MODULE_AUTHOR("NVIDIA CORPORATION");
++MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index f7080d0ab874..46b0372dd032 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct 
qlcnic_adapter *adapter)
+       struct list_head *head = &mbx->cmd_q;
+       struct qlcnic_cmd_args *cmd = NULL;
+ 
+-      spin_lock(&mbx->queue_lock);
++      spin_lock_bh(&mbx->queue_lock);
+ 
+       while (!list_empty(head)) {
+               cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+@@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct 
qlcnic_adapter *adapter)
+               qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+       }
+ 
+-      spin_unlock(&mbx->queue_lock);
++      spin_unlock_bh(&mbx->queue_lock);
+ }
+ 
+ static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+@@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct 
qlcnic_adapter *adapter,
+ {
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ 
+-      spin_lock(&mbx->queue_lock);
++      spin_lock_bh(&mbx->queue_lock);
+ 
+       list_del(&cmd->list);
+       mbx->num_cmds--;
+ 
+-      spin_unlock(&mbx->queue_lock);
++      spin_unlock_bh(&mbx->queue_lock);
+ 
+       qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+@@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct 
qlcnic_adapter *adapter,
+               init_completion(&cmd->completion);
+               cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+ 
+-              spin_lock(&mbx->queue_lock);
++              spin_lock_bh(&mbx->queue_lock);
+ 
+               list_add_tail(&cmd->list, &mbx->cmd_q);
+               mbx->num_cmds++;
+@@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct 
qlcnic_adapter *adapter,
+               *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+               queue_work(mbx->work_q, &mbx->work);
+ 
+-              spin_unlock(&mbx->queue_lock);
++              spin_unlock_bh(&mbx->queue_lock);
+ 
+               return 0;
+       }
+@@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct 
work_struct *work)
+               mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+               spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ 
+-              spin_lock(&mbx->queue_lock);
++              spin_lock_bh(&mbx->queue_lock);
+ 
+               if (list_empty(head)) {
+-                      spin_unlock(&mbx->queue_lock);
++                      spin_unlock_bh(&mbx->queue_lock);
+                       return;
+               }
+               cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ 
+-              spin_unlock(&mbx->queue_lock);
++              spin_unlock_bh(&mbx->queue_lock);
+ 
+               mbx_ops->encode_cmd(adapter, cmd);
+               mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+diff --git a/drivers/net/ethernet/realtek/r8169.c 
b/drivers/net/ethernet/realtek/r8169.c
+index 734286ebe5ef..dd713dff8d22 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
+ {
+       void __iomem *ioaddr = tp->mmio_addr;
+ 
+-      return RTL_R8(IBISR0) & 0x02;
++      return RTL_R8(IBISR0) & 0x20;
+ }
+ 
+ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+@@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private 
*tp)
+       void __iomem *ioaddr = tp->mmio_addr;
+ 
+       RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
+-      rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
++      rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
+       RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
+       RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+ }
+diff --git a/drivers/net/ethernet/rocker/rocker_main.c 
b/drivers/net/ethernet/rocker/rocker_main.c
+index fc8f8bdf6579..056cb6093630 100644
+--- a/drivers/net/ethernet/rocker/rocker_main.c
++++ b/drivers/net/ethernet/rocker/rocker_main.c
+@@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+               goto err_alloc_ordered_workqueue;
+       }
+ 
++      err = rocker_probe_ports(rocker);
++      if (err) {
++              dev_err(&pdev->dev, "failed to probe ports\n");
++              goto err_probe_ports;
++      }
++
+       /* Only FIBs pointing to our own netdevs are programmed into
+        * the device, so no need to pass a callback.
+        */
+@@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+ 
+       rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
+ 
+-      err = rocker_probe_ports(rocker);
+-      if (err) {
+-              dev_err(&pdev->dev, "failed to probe ports\n");
+-              goto err_probe_ports;
+-      }
+-
+       dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+                (int)sizeof(rocker->hw.id), &rocker->hw.id);
+ 
+       return 0;
+ 
+-err_probe_ports:
+-      unregister_switchdev_notifier(&rocker_switchdev_notifier);
+ err_register_switchdev_notifier:
+       unregister_fib_notifier(&rocker->fib_nb);
+ err_register_fib_notifier:
++      rocker_remove_ports(rocker);
++err_probe_ports:
+       destroy_workqueue(rocker->rocker_owq);
+ err_alloc_ordered_workqueue:
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+@@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev)
+ {
+       struct rocker *rocker = pci_get_drvdata(pdev);
+ 
+-      rocker_remove_ports(rocker);
+       unregister_switchdev_notifier(&rocker_switchdev_notifier);
+       unregister_fib_notifier(&rocker->fib_nb);
++      rocker_remove_ports(rocker);
+       rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+       destroy_workqueue(rocker->rocker_owq);
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 728819feab44..e7114c34fe4b 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1245,6 +1245,7 @@ static const struct usb_device_id products[] = {
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  
Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
++      {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
+ 
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index c7bdeb655646..5636c7ca8eba 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -1208,6 +1208,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
+       }
+       vhost_net_stop(n, &tx_sock, &rx_sock);
+       vhost_net_flush(n);
++      vhost_dev_stop(&n->dev);
+       vhost_dev_reset_owner(&n->dev, umem);
+       vhost_net_vq_reset(n);
+ done:
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index becf86aa4ac6..d6ec5a5a6782 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -280,7 +280,6 @@ struct tcf_block {
+       struct net *net;
+       struct Qdisc *q;
+       struct list_head cb_list;
+-      struct work_struct work;
+ };
+ 
+ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int 
sz)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index ac2ffd5e02b9..0a78ce57872d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5828,6 +5828,20 @@ void mem_cgroup_sk_alloc(struct sock *sk)
+       if (!mem_cgroup_sockets_enabled)
+               return;
+ 
++      /*
++       * Socket cloning can throw us here with sk_memcg already
++       * filled. It won't however, necessarily happen from
++       * process context. So the test for root memcg given
++       * the current task's memcg won't help us in this case.
++       *
++       * Respecting the original socket's memcg is a better
++       * decision in this case.
++       */
++      if (sk->sk_memcg) {
++              css_get(&sk->sk_memcg->css);
++              return;
++      }
++
+       rcu_read_lock();
+       memcg = mem_cgroup_from_task(current);
+       if (memcg == root_mem_cgroup)
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c0b5b2f17412..7571dabfc4cf 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1675,16 +1675,13 @@ struct sock *sk_clone_lock(const struct sock *sk, 
const gfp_t priority)
+               newsk->sk_dst_pending_confirm = 0;
+               newsk->sk_wmem_queued   = 0;
+               newsk->sk_forward_alloc = 0;
+-
+-              /* sk->sk_memcg will be populated at accept() time */
+-              newsk->sk_memcg = NULL;
+-
+               atomic_set(&newsk->sk_drops, 0);
+               newsk->sk_send_head     = NULL;
+               newsk->sk_userlocks     = sk->sk_userlocks & 
~SOCK_BINDPORT_LOCK;
+               atomic_set(&newsk->sk_zckey, 0);
+ 
+               sock_reset_flag(newsk, SOCK_DONE);
++              mem_cgroup_sk_alloc(newsk);
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+ 
+               rcu_read_lock();
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
+index 5eeb1d20cc38..676092d7bd81 100644
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -94,6 +94,16 @@ static struct sock_reuseport *reuseport_grow(struct 
sock_reuseport *reuse)
+       return more_reuse;
+ }
+ 
++static void reuseport_free_rcu(struct rcu_head *head)
++{
++      struct sock_reuseport *reuse;
++
++      reuse = container_of(head, struct sock_reuseport, rcu);
++      if (reuse->prog)
++              bpf_prog_destroy(reuse->prog);
++      kfree(reuse);
++}
++
+ /**
+  *  reuseport_add_sock - Add a socket to the reuseport group of another.
+  *  @sk:  New socket to add to the group.
+@@ -102,7 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct 
sock_reuseport *reuse)
+  */
+ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
+ {
+-      struct sock_reuseport *reuse;
++      struct sock_reuseport *old_reuse, *reuse;
+ 
+       if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
+               int err = reuseport_alloc(sk2);
+@@ -113,10 +123,13 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
+ 
+       spin_lock_bh(&reuseport_lock);
+       reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
+-                                        lockdep_is_held(&reuseport_lock)),
+-      WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
+-                                          lockdep_is_held(&reuseport_lock)),
+-                "socket already in reuseport group");
++                                        lockdep_is_held(&reuseport_lock));
++      old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
++                                           lockdep_is_held(&reuseport_lock));
++      if (old_reuse && old_reuse->num_socks != 1) {
++              spin_unlock_bh(&reuseport_lock);
++              return -EBUSY;
++      }
+ 
+       if (reuse->num_socks == reuse->max_socks) {
+               reuse = reuseport_grow(reuse);
+@@ -134,19 +147,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
+ 
+       spin_unlock_bh(&reuseport_lock);
+ 
++      if (old_reuse)
++              call_rcu(&old_reuse->rcu, reuseport_free_rcu);
+       return 0;
+ }
+ 
+-static void reuseport_free_rcu(struct rcu_head *head)
+-{
+-      struct sock_reuseport *reuse;
+-
+-      reuse = container_of(head, struct sock_reuseport, rcu);
+-      if (reuse->prog)
+-              bpf_prog_destroy(reuse->prog);
+-      kfree(reuse);
+-}
+-
+ void reuseport_detach_sock(struct sock *sk)
+ {
+       struct sock_reuseport *reuse;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 2d49717a7421..f0b1fc35dde1 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -386,7 +386,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device 
*dev, unsigned int mtu)
+       pip->frag_off = htons(IP_DF);
+       pip->ttl      = 1;
+       pip->daddr    = fl4.daddr;
++
++      rcu_read_lock();
+       pip->saddr    = igmpv3_get_srcaddr(dev, &fl4);
++      rcu_read_unlock();
++
+       pip->protocol = IPPROTO_IGMP;
+       pip->tot_len  = 0;      /* filled in later */
+       ip_select_ident(net, skb, NULL);
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 4ca46dc08e63..3668c4182655 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -475,7 +475,6 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, 
int *err, bool kern)
+               }
+               spin_unlock_bh(&queue->fastopenq.lock);
+       }
+-      mem_cgroup_sk_alloc(newsk);
+ out:
+       release_sock(sk);
+       if (req)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 8e053ad7cae2..c821f5d68720 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2434,6 +2434,12 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 
+       WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+ 
++      if (sk->sk_frag.page) {
++              put_page(sk->sk_frag.page);
++              sk->sk_frag.page = NULL;
++              sk->sk_frag.offset = 0;
++      }
++
+       sk->sk_error_report(sk);
+       return err;
+ }
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index 8322f26e770e..25c5a0b60cfc 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -481,7 +481,8 @@ static void bbr_advance_cycle_phase(struct sock *sk)
+ 
+       bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
+       bbr->cycle_mstamp = tp->delivered_mstamp;
+-      bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
++      bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
++                                          bbr_pacing_gain[bbr->cycle_idx];
+ }
+ 
+ /* Gain cycling: cycle pacing gain to converge to fair share of available bw. 
*/
+@@ -490,8 +491,7 @@ static void bbr_update_cycle_phase(struct sock *sk,
+ {
+       struct bbr *bbr = inet_csk_ca(sk);
+ 
+-      if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
+-          bbr_is_next_cycle_phase(sk, rs))
++      if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
+               bbr_advance_cycle_phase(sk);
+ }
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f49bd7897e95..2547222589fe 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -186,7 +186,8 @@ static struct rt6_info *addrconf_get_prefix_route(const 
struct in6_addr *pfx,
+ 
+ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
+ static void addrconf_dad_work(struct work_struct *w);
+-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
++static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
++                                 bool send_na);
+ static void addrconf_dad_run(struct inet6_dev *idev);
+ static void addrconf_rs_timer(struct timer_list *t);
+ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
+@@ -3833,12 +3834,17 @@ static void addrconf_dad_begin(struct inet6_ifaddr 
*ifp)
+            idev->cnf.accept_dad < 1) ||
+           !(ifp->flags&IFA_F_TENTATIVE) ||
+           ifp->flags & IFA_F_NODAD) {
++              bool send_na = false;
++
++              if (ifp->flags & IFA_F_TENTATIVE &&
++                  !(ifp->flags & IFA_F_OPTIMISTIC))
++                      send_na = true;
+               bump_id = ifp->flags & IFA_F_TENTATIVE;
+               ifp->flags &= 
~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
+               spin_unlock(&ifp->lock);
+               read_unlock_bh(&idev->lock);
+ 
+-              addrconf_dad_completed(ifp, bump_id);
++              addrconf_dad_completed(ifp, bump_id, send_na);
+               return;
+       }
+ 
+@@ -3967,16 +3973,21 @@ static void addrconf_dad_work(struct work_struct *w)
+       }
+ 
+       if (ifp->dad_probes == 0) {
++              bool send_na = false;
++
+               /*
+                * DAD was successful
+                */
+ 
++              if (ifp->flags & IFA_F_TENTATIVE &&
++                  !(ifp->flags & IFA_F_OPTIMISTIC))
++                      send_na = true;
+               bump_id = ifp->flags & IFA_F_TENTATIVE;
+               ifp->flags &= 
~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
+               spin_unlock(&ifp->lock);
+               write_unlock_bh(&idev->lock);
+ 
+-              addrconf_dad_completed(ifp, bump_id);
++              addrconf_dad_completed(ifp, bump_id, send_na);
+ 
+               goto out;
+       }
+@@ -4014,7 +4025,8 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
+       return true;
+ }
+ 
+-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
++static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
++                                 bool send_na)
+ {
+       struct net_device *dev = ifp->idev->dev;
+       struct in6_addr lladdr;
+@@ -4046,6 +4058,16 @@ static void addrconf_dad_completed(struct inet6_ifaddr 
*ifp, bool bump_id)
+       if (send_mld)
+               ipv6_mc_dad_complete(ifp->idev);
+ 
++      /* send unsolicited NA if enabled */
++      if (send_na &&
++          (ifp->idev->cnf.ndisc_notify ||
++           dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
++              ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
++                            /*router=*/ !!ifp->idev->cnf.forwarding,
++                            /*solicited=*/ false, /*override=*/ true,
++                            /*inc_opt=*/ true);
++      }
++
+       if (send_rs) {
+               /*
+                *      If a host as already performed a random delay
+@@ -4352,9 +4374,11 @@ static void addrconf_verify_rtnl(void)
+                                               spin_lock(&ifpub->lock);
+                                               ifpub->regen_count = 0;
+                                               spin_unlock(&ifpub->lock);
++                                              rcu_read_unlock_bh();
+                                               ipv6_create_tempaddr(ifpub, 
ifp, true);
+                                               in6_ifa_put(ifpub);
+                                               in6_ifa_put(ifp);
++                                              rcu_read_lock_bh();
+                                               goto restart;
+                                       }
+                               } else if (time_before(ifp->tstamp + 
ifp->prefered_lft * HZ - regen_advance * HZ, next))
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index c9441ca45399..416917719a6f 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -284,6 +284,7 @@ int inet6_bind(struct socket *sock, struct sockaddr 
*uaddr, int addr_len)
+       struct net *net = sock_net(sk);
+       __be32 v4addr = 0;
+       unsigned short snum;
++      bool saved_ipv6only;
+       int addr_type = 0;
+       int err = 0;
+ 
+@@ -389,19 +390,21 @@ int inet6_bind(struct socket *sock, struct sockaddr 
*uaddr, int addr_len)
+       if (!(addr_type & IPV6_ADDR_MULTICAST))
+               np->saddr = addr->sin6_addr;
+ 
++      saved_ipv6only = sk->sk_ipv6only;
++      if (addr_type != IPV6_ADDR_ANY && addr_type != IPV6_ADDR_MAPPED)
++              sk->sk_ipv6only = 1;
++
+       /* Make sure we are allowed to bind here. */
+       if ((snum || !inet->bind_address_no_port) &&
+           sk->sk_prot->get_port(sk, snum)) {
++              sk->sk_ipv6only = saved_ipv6only;
+               inet_reset_saddr(sk);
+               err = -EADDRINUSE;
+               goto out;
+       }
+ 
+-      if (addr_type != IPV6_ADDR_ANY) {
++      if (addr_type != IPV6_ADDR_ANY)
+               sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+-              if (addr_type != IPV6_ADDR_MAPPED)
+-                      sk->sk_ipv6only = 1;
+-      }
+       if (snum)
+               sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
+       inet->inet_sport = htons(inet->inet_num);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index a2e1a864eb46..4fc566ec7e79 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -495,6 +495,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, 
loff_t *pos)
+               return ERR_PTR(-ENOENT);
+ 
+       it->mrt = mrt;
++      it->cache = NULL;
+       return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
+               : SEQ_START_TOKEN;
+ }
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index b3cea200c85e..f61a5b613b52 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -566,6 +566,11 @@ static void ndisc_send_unsol_na(struct net_device *dev)
+ 
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
++              /* skip tentative addresses until dad completes */
++              if (ifa->flags & IFA_F_TENTATIVE &&
++                  !(ifa->flags & IFA_F_OPTIMISTIC))
++                      continue;
++
+               ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr,
+                             /*router=*/ !!idev->cnf.forwarding,
+                             /*solicited=*/ false, /*override=*/ true,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0458b761f3c5..a560fb1d0230 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1586,12 +1586,19 @@ static void rt6_age_examine_exception(struct 
rt6_exception_bucket *bucket,
+        * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
+        * expired, independently from their aging, as per RFC 8201 section 4
+        */
+-      if (!(rt->rt6i_flags & RTF_EXPIRES) &&
+-          time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
+-              RT6_TRACE("aging clone %p\n", rt);
++      if (!(rt->rt6i_flags & RTF_EXPIRES)) {
++              if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
++                      RT6_TRACE("aging clone %p\n", rt);
++                      rt6_remove_exception(bucket, rt6_ex);
++                      return;
++              }
++      } else if (time_after(jiffies, rt->dst.expires)) {
++              RT6_TRACE("purging expired route %p\n", rt);
+               rt6_remove_exception(bucket, rt6_ex);
+               return;
+-      } else if (rt->rt6i_flags & RTF_GATEWAY) {
++      }
++
++      if (rt->rt6i_flags & RTF_GATEWAY) {
+               struct neighbour *neigh;
+               __u8 neigh_flags = 0;
+ 
+@@ -1606,11 +1613,8 @@ static void rt6_age_examine_exception(struct 
rt6_exception_bucket *bucket,
+                       rt6_remove_exception(bucket, rt6_ex);
+                       return;
+               }
+-      } else if (__rt6_check_expired(rt)) {
+-              RT6_TRACE("purging expired route %p\n", rt);
+-              rt6_remove_exception(bucket, rt6_ex);
+-              return;
+       }
++
+       gc_args->more++;
+ }
+ 
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index b9d63d2246e6..e6b853f0ee4f 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -217,8 +217,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
+ 
+ static void tcf_chain_destroy(struct tcf_chain *chain)
+ {
++      struct tcf_block *block = chain->block;
++
+       list_del(&chain->list);
+       kfree(chain);
++      if (list_empty(&block->chain_list))
++              kfree(block);
+ }
+ 
+ static void tcf_chain_hold(struct tcf_chain *chain)
+@@ -329,49 +333,34 @@ int tcf_block_get(struct tcf_block **p_block,
+ }
+ EXPORT_SYMBOL(tcf_block_get);
+ 
+-static void tcf_block_put_final(struct work_struct *work)
+-{
+-      struct tcf_block *block = container_of(work, struct tcf_block, work);
+-      struct tcf_chain *chain, *tmp;
+-
+-      rtnl_lock();
+-
+-      /* At this point, all the chains should have refcnt == 1. */
+-      list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+-              tcf_chain_put(chain);
+-      rtnl_unlock();
+-      kfree(block);
+-}
+-
+ /* XXX: Standalone actions are not allowed to jump to any chain, and bound
+  * actions should be all removed after flushing.
+  */
+ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
+                      struct tcf_block_ext_info *ei)
+ {
+-      struct tcf_chain *chain;
++      struct tcf_chain *chain, *tmp;
+ 
+       if (!block)
+               return;
+-      /* Hold a refcnt for all chains, except 0, so that they don't disappear
++      /* Hold a refcnt for all chains, so that they don't disappear
+        * while we are iterating.
+        */
+       list_for_each_entry(chain, &block->chain_list, list)
+-              if (chain->index)
+-                      tcf_chain_hold(chain);
++              tcf_chain_hold(chain);
+ 
+       list_for_each_entry(chain, &block->chain_list, list)
+               tcf_chain_flush(chain);
+ 
+       tcf_block_offload_unbind(block, q, ei);
+ 
+-      INIT_WORK(&block->work, tcf_block_put_final);
+-      /* Wait for existing RCU callbacks to cool down, make sure their works
+-       * have been queued before this. We can not flush pending works here
+-       * because we are holding the RTNL lock.
+-       */
+-      rcu_barrier();
+-      tcf_queue_work(&block->work);
++      /* At this point, all the chains should have refcnt >= 1. */
++      list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
++              tcf_chain_put(chain);
++
++      /* Finally, put chain 0 and allow block to be freed. */
++      chain = list_first_entry(&block->chain_list, struct tcf_chain, list);
++      tcf_chain_put(chain);
+ }
+ EXPORT_SYMBOL(tcf_block_put_ext);
+ 
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 507859cdd1cb..33294b5b2c6a 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -544,6 +544,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 
handle)
+ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+                               u32 flags)
+ {
++      struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_u32_offload cls_u32 = {};
+       bool skip_sw = tc_skip_sw(flags);
+@@ -563,7 +564,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, 
struct tc_u_knode *n,
+       cls_u32.knode.sel = &n->sel;
+       cls_u32.knode.exts = &n->exts;
+       if (n->ht_down)
+-              cls_u32.knode.link_handle = n->ht_down->handle;
++              cls_u32.knode.link_handle = ht->handle;
+ 
+       err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
+       if (err < 0) {
+@@ -840,8 +841,9 @@ static void u32_replace_knode(struct tcf_proto *tp, struct 
tc_u_common *tp_c,
+ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
+                                        struct tc_u_knode *n)
+ {
+-      struct tc_u_knode *new;
++      struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
+       struct tc_u32_sel *s = &n->sel;
++      struct tc_u_knode *new;
+ 
+       new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
+                     GFP_KERNEL);
+@@ -859,11 +861,11 @@ static struct tc_u_knode *u32_init_knode(struct 
tcf_proto *tp,
+       new->fshift = n->fshift;
+       new->res = n->res;
+       new->flags = n->flags;
+-      RCU_INIT_POINTER(new->ht_down, n->ht_down);
++      RCU_INIT_POINTER(new->ht_down, ht);
+ 
+       /* bump reference count as long as we hold pointer to structure */
+-      if (new->ht_down)
+-              new->ht_down->refcnt++;
++      if (ht)
++              ht->refcnt++;
+ 
+ #ifdef CONFIG_CLS_U32_PERF
+       /* Statistics may be incremented by readers during update

Reply via email to