commit:     167258b56efde60fa9762a0de8dc69a8d22fa322
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug  4 11:54:45 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug  4 11:54:45 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=167258b5

Linux patch 4.14.242

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1241_linux-4.14.242.patch | 1768 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1772 insertions(+)

diff --git a/0000_README b/0000_README
index 2161354..e9e9664 100644
--- a/0000_README
+++ b/0000_README
@@ -1007,6 +1007,10 @@ Patch:  1240_linux-4.14.241.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.241
 
+Patch:  1241_linux-4.14.242.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.242
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1241_linux-4.14.242.patch b/1241_linux-4.14.242.patch
new file mode 100644
index 0000000..0abd1f0
--- /dev/null
+++ b/1241_linux-4.14.242.patch
@@ -0,0 +1,1768 @@
+diff --git a/Makefile b/Makefile
+index 439f416c36ff8..0179c3c463b38 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 241
++SUBLEVEL = 242
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/boot/dts/versatile-ab.dts 
b/arch/arm/boot/dts/versatile-ab.dts
+index a9000d22b2c00..873889ddecbed 100644
+--- a/arch/arm/boot/dts/versatile-ab.dts
++++ b/arch/arm/boot/dts/versatile-ab.dts
+@@ -155,16 +155,15 @@
+               #size-cells = <1>;
+               ranges;
+ 
+-              vic: intc@10140000 {
++              vic: interrupt-controller@10140000 {
+                       compatible = "arm,versatile-vic";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       reg = <0x10140000 0x1000>;
+-                      clear-mask = <0xffffffff>;
+                       valid-mask = <0xffffffff>;
+               };
+ 
+-              sic: intc@10003000 {
++              sic: interrupt-controller@10003000 {
+                       compatible = "arm,versatile-sic";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+diff --git a/arch/arm/boot/dts/versatile-pb.dts 
b/arch/arm/boot/dts/versatile-pb.dts
+index 06a0fdf24026c..e7e751a858d81 100644
+--- a/arch/arm/boot/dts/versatile-pb.dts
++++ b/arch/arm/boot/dts/versatile-pb.dts
+@@ -7,7 +7,7 @@
+ 
+       amba {
+               /* The Versatile PB is using more SIC IRQ lines than the AB */
+-              sic: intc@10003000 {
++              sic: interrupt-controller@10003000 {
+                       clear-mask = <0xffffffff>;
+                       /*
+                        * Valid interrupt lines mask according to
+diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
+index 6e81788a30c12..0eaca7a130c9f 100644
+--- a/arch/x86/include/asm/proto.h
++++ b/arch/x86/include/asm/proto.h
+@@ -4,6 +4,8 @@
+ 
+ #include <asm/ldt.h>
+ 
++struct task_struct;
++
+ /* misc architecture specific prototypes */
+ 
+ void syscall_init(void);
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index dab6940ea99cb..b928e61fe3751 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic 
*ioapic,
+ static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
+ {
+       ioapic->rtc_status.pending_eoi = 0;
+-      bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
++      bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
+ }
+ 
+ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
+diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
+index ea1a4e0297dae..283f1f489bcac 100644
+--- a/arch/x86/kvm/ioapic.h
++++ b/arch/x86/kvm/ioapic.h
+@@ -43,13 +43,13 @@ struct kvm_vcpu;
+ 
+ struct dest_map {
+       /* vcpu bitmap where IRQ has been sent */
+-      DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
++      DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
+ 
+       /*
+        * Vector sent to a given vcpu, only valid when
+        * the vcpu's bit in map is set
+        */
+-      u8 vectors[KVM_MAX_VCPU_ID];
++      u8 vectors[KVM_MAX_VCPU_ID + 1];
+ };
+ 
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 37d826acd0179..d77caab7ad5e4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -400,8 +400,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
+ 
+       if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
+       queue:
+-              if (has_error && !is_protmode(vcpu))
+-                      has_error = false;
+               if (reinject) {
+                       /*
+                        * On vmentry, vcpu->arch.exception.pending is only
+@@ -6624,13 +6622,20 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+       kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
+ }
+ 
++static void kvm_inject_exception(struct kvm_vcpu *vcpu)
++{
++       if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
++               vcpu->arch.exception.error_code = false;
++       kvm_x86_ops->queue_exception(vcpu);
++}
++
+ static int inject_pending_event(struct kvm_vcpu *vcpu)
+ {
+       int r;
+ 
+       /* try to reinject previous events if any */
+       if (vcpu->arch.exception.injected) {
+-              kvm_x86_ops->queue_exception(vcpu);
++              kvm_inject_exception(vcpu);
+               return 0;
+       }
+ 
+@@ -6675,7 +6680,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
+                       kvm_update_dr7(vcpu);
+               }
+ 
+-              kvm_x86_ops->queue_exception(vcpu);
++              kvm_inject_exception(vcpu);
+       } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
+               vcpu->arch.smi_pending = false;
+               enter_smm(vcpu);
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index ddaf46239e39e..472175e37055e 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -236,7 +236,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int 
len)
+       return ret;
+ }
+ 
+-static u8 hi3110_cmd(struct spi_device *spi, u8 command)
++static int hi3110_cmd(struct spi_device *spi, u8 command)
+ {
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+ 
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 022a9b3c7d4e8..d62d61d734ea1 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -267,6 +267,8 @@ struct ems_usb {
+       unsigned int free_slots; /* remember number of available slots */
+ 
+       struct ems_cpc_msg active_params; /* active controller parameters */
++      void *rxbuf[MAX_RX_URBS];
++      dma_addr_t rxbuf_dma[MAX_RX_URBS];
+ };
+ 
+ static void ems_usb_read_interrupt_callback(struct urb *urb)
+@@ -598,6 +600,7 @@ static int ems_usb_start(struct ems_usb *dev)
+       for (i = 0; i < MAX_RX_URBS; i++) {
+               struct urb *urb = NULL;
+               u8 *buf = NULL;
++              dma_addr_t buf_dma;
+ 
+               /* create a URB, and a buffer for it */
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+@@ -607,7 +610,7 @@ static int ems_usb_start(struct ems_usb *dev)
+               }
+ 
+               buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+-                                       &urb->transfer_dma);
++                                       &buf_dma);
+               if (!buf) {
+                       netdev_err(netdev, "No memory left for USB buffer\n");
+                       usb_free_urb(urb);
+@@ -615,6 +618,8 @@ static int ems_usb_start(struct ems_usb *dev)
+                       break;
+               }
+ 
++              urb->transfer_dma = buf_dma;
++
+               usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
+                                 buf, RX_BUFFER_SIZE,
+                                 ems_usb_read_bulk_callback, dev);
+@@ -630,6 +635,9 @@ static int ems_usb_start(struct ems_usb *dev)
+                       break;
+               }
+ 
++              dev->rxbuf[i] = buf;
++              dev->rxbuf_dma[i] = buf_dma;
++
+               /* Drop reference, USB core will take care of freeing it */
+               usb_free_urb(urb);
+       }
+@@ -695,6 +703,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
+ 
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+ 
++      for (i = 0; i < MAX_RX_URBS; ++i)
++              usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
++                                dev->rxbuf[i], dev->rxbuf_dma[i]);
++
+       usb_kill_anchored_urbs(&dev->tx_submitted);
+       atomic_set(&dev->active_tx_urbs, 0);
+ 
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index c6dcf93675c00..592c6e7f3dca4 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -207,6 +207,8 @@ struct esd_usb2 {
+       int net_count;
+       u32 version;
+       int rxinitdone;
++      void *rxbuf[MAX_RX_URBS];
++      dma_addr_t rxbuf_dma[MAX_RX_URBS];
+ };
+ 
+ struct esd_usb2_net_priv {
+@@ -556,6 +558,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+       for (i = 0; i < MAX_RX_URBS; i++) {
+               struct urb *urb = NULL;
+               u8 *buf = NULL;
++              dma_addr_t buf_dma;
+ 
+               /* create a URB, and a buffer for it */
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+@@ -565,7 +568,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+               }
+ 
+               buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+-                                       &urb->transfer_dma);
++                                       &buf_dma);
+               if (!buf) {
+                       dev_warn(dev->udev->dev.parent,
+                                "No memory left for USB buffer\n");
+@@ -573,6 +576,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+                       goto freeurb;
+               }
+ 
++              urb->transfer_dma = buf_dma;
++
+               usb_fill_bulk_urb(urb, dev->udev,
+                                 usb_rcvbulkpipe(dev->udev, 1),
+                                 buf, RX_BUFFER_SIZE,
+@@ -585,8 +590,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+                       usb_unanchor_urb(urb);
+                       usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+                                         urb->transfer_dma);
++                      goto freeurb;
+               }
+ 
++              dev->rxbuf[i] = buf;
++              dev->rxbuf_dma[i] = buf_dma;
++
+ freeurb:
+               /* Drop reference, USB core will take care of freeing it */
+               usb_free_urb(urb);
+@@ -674,6 +683,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
+       int i, j;
+ 
+       usb_kill_anchored_urbs(&dev->rx_submitted);
++
++      for (i = 0; i < MAX_RX_URBS; ++i)
++              usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
++                                dev->rxbuf[i], dev->rxbuf_dma[i]);
++
+       for (i = 0; i < dev->net_count; i++) {
+               priv = dev->nets[i];
+               if (priv) {
+diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
+index 6b0c6009dde0d..9e43fbb4cc9d7 100644
+--- a/drivers/net/can/usb/mcba_usb.c
++++ b/drivers/net/can/usb/mcba_usb.c
+@@ -664,6 +664,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
+                       break;
+               }
+ 
++              urb->transfer_dma = buf_dma;
++
+               usb_fill_bulk_urb(urb, priv->udev,
+                                 usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+                                 buf, MCBA_USB_RX_BUFF_SIZE,
+diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
+index 3e44164736079..df99354ec12aa 100644
+--- a/drivers/net/can/usb/usb_8dev.c
++++ b/drivers/net/can/usb/usb_8dev.c
+@@ -148,7 +148,8 @@ struct usb_8dev_priv {
+       u8 *cmd_msg_buffer;
+ 
+       struct mutex usb_8dev_cmd_lock;
+-
++      void *rxbuf[MAX_RX_URBS];
++      dma_addr_t rxbuf_dma[MAX_RX_URBS];
+ };
+ 
+ /* tx frame */
+@@ -744,6 +745,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
+       for (i = 0; i < MAX_RX_URBS; i++) {
+               struct urb *urb = NULL;
+               u8 *buf;
++              dma_addr_t buf_dma;
+ 
+               /* create a URB, and a buffer for it */
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+@@ -753,7 +755,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
+               }
+ 
+               buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+-                                       &urb->transfer_dma);
++                                       &buf_dma);
+               if (!buf) {
+                       netdev_err(netdev, "No memory left for USB buffer\n");
+                       usb_free_urb(urb);
+@@ -761,6 +763,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
+                       break;
+               }
+ 
++              urb->transfer_dma = buf_dma;
++
+               usb_fill_bulk_urb(urb, priv->udev,
+                                 usb_rcvbulkpipe(priv->udev,
+                                                 USB_8DEV_ENDP_DATA_RX),
+@@ -778,6 +782,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
+                       break;
+               }
+ 
++              priv->rxbuf[i] = buf;
++              priv->rxbuf_dma[i] = buf_dma;
++
+               /* Drop reference, USB core will take care of freeing it */
+               usb_free_urb(urb);
+       }
+@@ -847,6 +854,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
+ 
+       usb_kill_anchored_urbs(&priv->rx_submitted);
+ 
++      for (i = 0; i < MAX_RX_URBS; ++i)
++              usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
++                                priv->rxbuf[i], priv->rxbuf_dma[i]);
++
+       usb_kill_anchored_urbs(&priv->tx_submitted);
+       atomic_set(&priv->active_tx_urbs, 0);
+ 
+diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c 
b/drivers/net/ethernet/dec/tulip/winbond-840.c
+index 32d7229544fae..e3b4345b2cc83 100644
+--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
++++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
+@@ -367,7 +367,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+       int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+       void __iomem *ioaddr;
+ 
+-      i = pci_enable_device(pdev);
++      i = pcim_enable_device(pdev);
+       if (i) return i;
+ 
+       pci_set_master(pdev);
+@@ -389,7 +389,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+ 
+       ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
+       if (!ioaddr)
+-              goto err_out_free_res;
++              goto err_out_netdev;
+ 
+       for (i = 0; i < 3; i++)
+               ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, 
i));
+@@ -468,8 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+ 
+ err_out_cleardev:
+       pci_iounmap(pdev, ioaddr);
+-err_out_free_res:
+-      pci_release_regions(pdev);
+ err_out_netdev:
+       free_netdev (dev);
+       return -ENODEV;
+@@ -1537,7 +1535,6 @@ static void w840_remove1(struct pci_dev *pdev)
+       if (dev) {
+               struct netdev_private *np = netdev_priv(dev);
+               unregister_netdev(dev);
+-              pci_release_regions(pdev);
+               pci_iounmap(pdev, np->base_addr);
+               free_netdev(dev);
+       }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c 
b/drivers/net/ethernet/mellanox/mlx4/main.c
+index c6660b61e8361..69692f7a523cf 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -3469,6 +3469,7 @@ slave_start:
+ 
+               if (!SRIOV_VALID_STATE(dev->flags)) {
+                       mlx4_err(dev, "Invalid SRIOV state\n");
++                      err = -EINVAL;
+                       goto err_close;
+               }
+       }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 24d1b0be5a68e..24f70c337d8fc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -795,17 +795,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
+ static int connect_flow_table(struct mlx5_core_dev *dev, struct 
mlx5_flow_table *ft,
+                             struct fs_prio *prio)
+ {
+-      struct mlx5_flow_table *next_ft;
++      struct mlx5_flow_table *next_ft, *first_ft;
+       int err = 0;
+ 
+       /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
+ 
+-      if (list_empty(&prio->node.children)) {
++      first_ft = list_first_entry_or_null(&prio->node.children,
++                                          struct mlx5_flow_table, node.list);
++      if (!first_ft || first_ft->level > ft->level) {
+               err = connect_prev_fts(dev, ft, prio);
+               if (err)
+                       return err;
+ 
+-              next_ft = find_next_chained_ft(prio);
++              next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
+               err = connect_fwd_rules(dev, ft, next_ft);
+               if (err)
+                       return err;
+@@ -1703,7 +1705,7 @@ static int disconnect_flow_table(struct mlx5_flow_table 
*ft)
+                               node.list) == ft))
+               return 0;
+ 
+-      next_ft = find_next_chained_ft(prio);
++      next_ft = find_next_ft(ft);
+       err = connect_fwd_rules(dev, next_ft, ft);
+       if (err)
+               return err;
+diff --git a/drivers/net/ethernet/sis/sis900.c 
b/drivers/net/ethernet/sis/sis900.c
+index 43b090f61cdcd..aebc85a5e08aa 100644
+--- a/drivers/net/ethernet/sis/sis900.c
++++ b/drivers/net/ethernet/sis/sis900.c
+@@ -441,7 +441,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
+ #endif
+ 
+       /* setup various bits in PCI command register */
+-      ret = pci_enable_device(pci_dev);
++      ret = pcim_enable_device(pci_dev);
+       if(ret) return ret;
+ 
+       i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+@@ -467,7 +467,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
+       ioaddr = pci_iomap(pci_dev, 0, 0);
+       if (!ioaddr) {
+               ret = -ENOMEM;
+-              goto err_out_cleardev;
++              goto err_out;
+       }
+ 
+       sis_priv = netdev_priv(net_dev);
+@@ -575,8 +575,6 @@ err_unmap_tx:
+               sis_priv->tx_ring_dma);
+ err_out_unmap:
+       pci_iounmap(pci_dev, ioaddr);
+-err_out_cleardev:
+-      pci_release_regions(pci_dev);
+  err_out:
+       free_netdev(net_dev);
+       return ret;
+@@ -2423,7 +2421,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
+               sis_priv->tx_ring_dma);
+       pci_iounmap(pci_dev, sis_priv->ioaddr);
+       free_netdev(net_dev);
+-      pci_release_regions(pci_dev);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index d39cde74826da..7ba9cad183414 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -8211,8 +8211,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
+               err = niu_pci_vpd_scan_props(np, here, end);
+               if (err < 0)
+                       return err;
++              /* ret == 1 is not an error */
+               if (err == 1)
+-                      return -EINVAL;
++                      return 0;
+       }
+       return 0;
+ }
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index c8abbf81ef524..9e18389309cf4 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -339,9 +339,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info 
*vi,
+       offset += hdr_padded_len;
+       p += hdr_padded_len;
+ 
+-      copy = len;
+-      if (copy > skb_tailroom(skb))
+-              copy = skb_tailroom(skb);
++      /* Copy all frame if it fits skb->head, otherwise
++       * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
++       */
++      if (len <= skb_tailroom(skb))
++              copy = len;
++      else
++              copy = ETH_HLEN;
+       skb_put_data(skb, p, copy);
+ 
+       len -= copy;
+diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
+index 33449820e7542..533e3aa6275cd 100644
+--- a/drivers/nfc/nfcsim.c
++++ b/drivers/nfc/nfcsim.c
+@@ -201,8 +201,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
+ 
+               if (!IS_ERR(skb))
+                       dev_kfree_skb(skb);
+-
+-              skb = ERR_PTR(-ENODEV);
++              return;
+       }
+ 
+       dev->cb(dev->nfc_digital_dev, dev->arg, skb);
+diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
+index 4af318fbda774..ef9498a6e88ac 100644
+--- a/fs/hfs/bfind.c
++++ b/fs/hfs/bfind.c
+@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct 
hfs_find_data *fd)
+       fd->key = ptr + tree->max_key_len + 2;
+       hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+               tree->cnid, __builtin_return_address(0));
+-      mutex_lock(&tree->tree_lock);
++      switch (tree->cnid) {
++      case HFS_CAT_CNID:
++              mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
++              break;
++      case HFS_EXT_CNID:
++              mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
++              break;
++      case HFS_ATTR_CNID:
++              mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
++              break;
++      default:
++              return -EINVAL;
++      }
+       return 0;
+ }
+ 
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index 8aec5e732abf9..bca3ea4137eeb 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -15,16 +15,31 @@
+ 
+ #include "btree.h"
+ 
+-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
+-              int off, int len)
++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+ {
+       struct page *page;
++      int pagenum;
++      int bytes_read;
++      int bytes_to_read;
++      void *vaddr;
+ 
+       off += node->page_offset;
+-      page = node->page[0];
++      pagenum = off >> PAGE_SHIFT;
++      off &= ~PAGE_MASK; /* compute page offset for the first page */
+ 
+-      memcpy(buf, kmap(page) + off, len);
+-      kunmap(page);
++      for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
++              if (pagenum >= node->tree->pages_per_bnode)
++                      break;
++              page = node->page[pagenum];
++              bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
++
++              vaddr = kmap_atomic(page);
++              memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
++              kunmap_atomic(vaddr);
++
++              pagenum++;
++              off = 0; /* page offset only applies to the first page */
++      }
+ }
+ 
+ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
+index dcc2aab1b2c43..25ac9a8bb57a7 100644
+--- a/fs/hfs/btree.h
++++ b/fs/hfs/btree.h
+@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const 
btree_key *);
+ 
+ #define NODE_HASH_SIZE  256
+ 
++/* B-tree mutex nested subclasses */
++enum hfs_btree_mutex_classes {
++      CATALOG_BTREE_MUTEX,
++      EXTENTS_BTREE_MUTEX,
++      ATTR_BTREE_MUTEX,
++};
++
+ /* A HFS BTree held in memory */
+ struct hfs_btree {
+       struct super_block *sb;
+diff --git a/fs/hfs/super.c b/fs/hfs/super.c
+index 7e0d65e9586c7..691810b0e6bc8 100644
+--- a/fs/hfs/super.c
++++ b/fs/hfs/super.c
+@@ -427,14 +427,12 @@ static int hfs_fill_super(struct super_block *sb, void 
*data, int silent)
+       if (!res) {
+               if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+                       res =  -EIO;
+-                      goto bail;
++                      goto bail_hfs_find;
+               }
+               hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+       }
+-      if (res) {
+-              hfs_find_exit(&fd);
+-              goto bail_no_root;
+-      }
++      if (res)
++              goto bail_hfs_find;
+       res = -EINVAL;
+       root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
+       hfs_find_exit(&fd);
+@@ -450,6 +448,8 @@ static int hfs_fill_super(struct super_block *sb, void 
*data, int silent)
+       /* everything's okay */
+       return 0;
+ 
++bail_hfs_find:
++      hfs_find_exit(&fd);
+ bail_no_root:
+       pr_err("get root inode failed\n");
+ bail:
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 90e658caa8deb..6a809517ca500 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1535,6 +1535,45 @@ static void ocfs2_truncate_cluster_pages(struct inode 
*inode, u64 byte_start,
+       }
+ }
+ 
++/*
++ * zero out partial blocks of one cluster.
++ *
++ * start: file offset where zero starts, will be made upper block aligned.
++ * len: it will be trimmed to the end of current cluster if "start + len"
++ *      is bigger than it.
++ */
++static int ocfs2_zeroout_partial_cluster(struct inode *inode,
++                                      u64 start, u64 len)
++{
++      int ret;
++      u64 start_block, end_block, nr_blocks;
++      u64 p_block, offset;
++      u32 cluster, p_cluster, nr_clusters;
++      struct super_block *sb = inode->i_sb;
++      u64 end = ocfs2_align_bytes_to_clusters(sb, start);
++
++      if (start + len < end)
++              end = start + len;
++
++      start_block = ocfs2_blocks_for_bytes(sb, start);
++      end_block = ocfs2_blocks_for_bytes(sb, end);
++      nr_blocks = end_block - start_block;
++      if (!nr_blocks)
++              return 0;
++
++      cluster = ocfs2_bytes_to_clusters(sb, start);
++      ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
++                              &nr_clusters, NULL);
++      if (ret)
++              return ret;
++      if (!p_cluster)
++              return 0;
++
++      offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
++      p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
++      return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
++}
++
+ static int ocfs2_zero_partial_clusters(struct inode *inode,
+                                      u64 start, u64 len)
+ {
+@@ -1544,6 +1583,7 @@ static int ocfs2_zero_partial_clusters(struct inode 
*inode,
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       unsigned int csize = osb->s_clustersize;
+       handle_t *handle;
++      loff_t isize = i_size_read(inode);
+ 
+       /*
+        * The "start" and "end" values are NOT necessarily part of
+@@ -1564,6 +1604,26 @@ static int ocfs2_zero_partial_clusters(struct inode 
*inode,
+       if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
+               goto out;
+ 
++      /* No page cache for EOF blocks, issue zero out to disk. */
++      if (end > isize) {
++              /*
++               * zeroout eof blocks in last cluster starting from
++               * "isize" even "start" > "isize" because it is
++               * complicated to zeroout just at "start" as "start"
++               * may be not aligned with block size, buffer write
++               * would be required to do that, but out of eof buffer
++               * write is not supported.
++               */
++              ret = ocfs2_zeroout_partial_cluster(inode, isize,
++                                      end - isize);
++              if (ret) {
++                      mlog_errno(ret);
++                      goto out;
++              }
++              if (start >= isize)
++                      goto out;
++              end = isize;
++      }
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+@@ -1861,45 +1921,6 @@ out:
+       return ret;
+ }
+ 
+-/*
+- * zero out partial blocks of one cluster.
+- *
+- * start: file offset where zero starts, will be made upper block aligned.
+- * len: it will be trimmed to the end of current cluster if "start + len"
+- *      is bigger than it.
+- */
+-static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+-                                      u64 start, u64 len)
+-{
+-      int ret;
+-      u64 start_block, end_block, nr_blocks;
+-      u64 p_block, offset;
+-      u32 cluster, p_cluster, nr_clusters;
+-      struct super_block *sb = inode->i_sb;
+-      u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+-
+-      if (start + len < end)
+-              end = start + len;
+-
+-      start_block = ocfs2_blocks_for_bytes(sb, start);
+-      end_block = ocfs2_blocks_for_bytes(sb, end);
+-      nr_blocks = end_block - start_block;
+-      if (!nr_blocks)
+-              return 0;
+-
+-      cluster = ocfs2_bytes_to_clusters(sb, start);
+-      ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+-                              &nr_clusters, NULL);
+-      if (ret)
+-              return ret;
+-      if (!p_cluster)
+-              return 0;
+-
+-      offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+-      p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+-      return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+-}
+-
+ /*
+  * Parts of this function taken from xfs_change_file_space()
+  */
+@@ -1941,7 +1962,6 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+               goto out_inode_unlock;
+       }
+ 
+-      orig_isize = i_size_read(inode);
+       switch (sr->l_whence) {
+       case 0: /*SEEK_SET*/
+               break;
+@@ -1949,7 +1969,7 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+               sr->l_start += f_pos;
+               break;
+       case 2: /*SEEK_END*/
+-              sr->l_start += orig_isize;
++              sr->l_start += i_size_read(inode);
+               break;
+       default:
+               ret = -EINVAL;
+@@ -2004,6 +2024,7 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+               ret = -EINVAL;
+       }
+ 
++      orig_isize = i_size_read(inode);
+       /* zeroout eof blocks in the cluster. */
+       if (!ret && change_size && orig_isize < size) {
+               ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index de3e59329b022..2f303454a3233 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2784,6 +2784,15 @@ static inline void skb_propagate_pfmemalloc(struct page 
*page,
+               skb->pfmemalloc = true;
+ }
+ 
++/**
++ * skb_frag_off() - Returns the offset of a skb fragment
++ * @frag: the paged fragment
++ */
++static inline unsigned int skb_frag_off(const skb_frag_t *frag)
++{
++      return frag->page_offset;
++}
++
+ /**
+  * skb_frag_page - retrieve the page referred to by a paged fragment
+  * @frag: the paged fragment
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 48afea1b8b4e5..ca68826d84495 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -65,14 +65,18 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff 
*skb,
+       skb_reset_mac_header(skb);
+ 
+       if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+-              u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+-              u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
++              u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
++              u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
++              u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
++
++              if (!pskb_may_pull(skb, needed))
++                      return -EINVAL;
+ 
+               if (!skb_partial_csum_set(skb, start, off))
+                       return -EINVAL;
+ 
+               p_off = skb_transport_offset(skb) + thlen;
+-              if (p_off > skb_headlen(skb))
++              if (!pskb_may_pull(skb, p_off))
+                       return -EINVAL;
+       } else {
+               /* gso packets without NEEDS_CSUM do not set transport_offset.
+@@ -100,14 +104,14 @@ retry:
+                       }
+ 
+                       p_off = keys.control.thoff + thlen;
+-                      if (p_off > skb_headlen(skb) ||
++                      if (!pskb_may_pull(skb, p_off) ||
+                           keys.basic.ip_proto != ip_proto)
+                               return -EINVAL;
+ 
+                       skb_set_transport_header(skb, keys.control.thoff);
+               } else if (gso_type) {
+                       p_off = thlen;
+-                      if (p_off > skb_headlen(skb))
++                      if (!pskb_may_pull(skb, p_off))
+                               return -EINVAL;
+               }
+       }
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index a5ba41b3b8673..7ec1cdb66be8d 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -10,6 +10,7 @@
+ 
+ void unix_inflight(struct user_struct *user, struct file *fp);
+ void unix_notinflight(struct user_struct *user, struct file *fp);
++void unix_destruct_scm(struct sk_buff *skb);
+ void unix_gc(void);
+ void wait_for_unix_gc(void);
+ struct sock *unix_get_socket(struct file *filp);
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index c86fcadccbd79..5dd22b740f9ce 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -48,7 +48,7 @@ static inline bool net_busy_loop_on(void)
+ 
+ static inline bool sk_can_busy_loop(const struct sock *sk)
+ {
+-      return sk->sk_ll_usec && !signal_pending(current);
++      return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
+ }
+ 
+ bool sk_busy_loop_end(void *p, unsigned long start_time);
+diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
+index c0f0a13ed8183..49aa79c7b278a 100644
+--- a/include/net/llc_pdu.h
++++ b/include/net/llc_pdu.h
+@@ -15,9 +15,11 @@
+ #include <linux/if_ether.h>
+ 
+ /* Lengths of frame formats */
+-#define LLC_PDU_LEN_I 4       /* header and 2 control bytes */
+-#define LLC_PDU_LEN_S 4
+-#define LLC_PDU_LEN_U 3       /* header and 1 control byte */
++#define LLC_PDU_LEN_I         4       /* header and 2 control bytes */
++#define LLC_PDU_LEN_S         4
++#define LLC_PDU_LEN_U         3       /* header and 1 control byte */
++/* header and 1 control byte and XID info */
++#define LLC_PDU_LEN_U_XID     (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
+ /* Known SAP addresses */
+ #define LLC_GLOBAL_SAP        0xFF
+ #define LLC_NULL_SAP  0x00    /* not network-layer visible */
+@@ -50,9 +52,10 @@
+ #define LLC_PDU_TYPE_U_MASK    0x03   /* 8-bit control field */
+ #define LLC_PDU_TYPE_MASK      0x03
+ 
+-#define LLC_PDU_TYPE_I        0       /* first bit */
+-#define LLC_PDU_TYPE_S        1       /* first two bits */
+-#define LLC_PDU_TYPE_U        3       /* first two bits */
++#define LLC_PDU_TYPE_I                0       /* first bit */
++#define LLC_PDU_TYPE_S                1       /* first two bits */
++#define LLC_PDU_TYPE_U                3       /* first two bits */
++#define LLC_PDU_TYPE_U_XID    4       /* private type for detecting XID 
commands */
+ 
+ #define LLC_PDU_TYPE_IS_I(pdu) \
+       ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
+@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct 
sk_buff *skb)
+ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
+                                      u8 ssap, u8 dsap, u8 cr)
+ {
+-      const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
++      int hlen = 4; /* default value for I and S types */
+       struct llc_pdu_un *pdu;
+ 
++      switch (type) {
++      case LLC_PDU_TYPE_U:
++              hlen = 3;
++              break;
++      case LLC_PDU_TYPE_U_XID:
++              hlen = 6;
++              break;
++      }
++
+       skb_push(skb, hlen);
+       skb_reset_network_header(skb);
+       pdu = llc_pdu_un_hdr(skb);
+@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff 
*skb,
+       xid_info->fmt_id = LLC_XID_FMT_ID;      /* 0x81 */
+       xid_info->type   = svcs_supported;
+       xid_info->rw     = rx_window << 1;      /* size of receive window */
+-      skb_put(skb, sizeof(struct llc_xid_info));
++
++      /* no need to push/put since llc_pdu_header_init() has already
++       * pushed 3 + 3 bytes
++       */
+ }
+ 
+ /**
+diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
+index d4da07048aa3e..cbf96458ce22d 100644
+--- a/include/net/sctp/constants.h
++++ b/include/net/sctp/constants.h
+@@ -348,8 +348,7 @@ enum {
+ #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
+ 
+ /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
+- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
+- * 192.88.99.0/24.
++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
+  * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
+  * addresses.
+  */
+@@ -357,7 +356,6 @@ enum {
+       ((htonl(INADDR_BROADCAST) == a) ||  \
+        ipv4_is_multicast(a) ||            \
+        ipv4_is_zeronet(a) ||              \
+-       ipv4_is_test_198(a) ||             \
+        ipv4_is_anycast_6to4(a))
+ 
+ /* Flags used for the bind address copy functions.  */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 58e7eefe4dbff..cf5d8fd7472e6 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3441,15 +3441,21 @@ static void pwq_unbound_release_workfn(struct 
work_struct *work)
+                                                 unbound_release_work);
+       struct workqueue_struct *wq = pwq->wq;
+       struct worker_pool *pool = pwq->pool;
+-      bool is_last;
++      bool is_last = false;
+ 
+-      if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+-              return;
++      /*
++       * when @pwq is not linked, it doesn't hold any reference to the
++       * @wq, and @wq is invalid to access.
++       */
++      if (!list_empty(&pwq->pwqs_node)) {
++              if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
++                      return;
+ 
+-      mutex_lock(&wq->mutex);
+-      list_del_rcu(&pwq->pwqs_node);
+-      is_last = list_empty(&wq->pwqs);
+-      mutex_unlock(&wq->mutex);
++              mutex_lock(&wq->mutex);
++              list_del_rcu(&pwq->pwqs_node);
++              is_last = list_empty(&wq->pwqs);
++              mutex_unlock(&wq->mutex);
++      }
+ 
+       mutex_lock(&wq_pool_mutex);
+       put_unbound_pool(pool);
+diff --git a/net/802/garp.c b/net/802/garp.c
+index 2dac647ff4201..237f6f0763556 100644
+--- a/net/802/garp.c
++++ b/net/802/garp.c
+@@ -206,6 +206,19 @@ static void garp_attr_destroy(struct garp_applicant *app, 
struct garp_attr *attr
+       kfree(attr);
+ }
+ 
++static void garp_attr_destroy_all(struct garp_applicant *app)
++{
++      struct rb_node *node, *next;
++      struct garp_attr *attr;
++
++      for (node = rb_first(&app->gid);
++           next = node ? rb_next(node) : NULL, node != NULL;
++           node = next) {
++              attr = rb_entry(node, struct garp_attr, node);
++              garp_attr_destroy(app, attr);
++      }
++}
++
+ static int garp_pdu_init(struct garp_applicant *app)
+ {
+       struct sk_buff *skb;
+@@ -612,6 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct 
garp_application *appl
+ 
+       spin_lock_bh(&app->lock);
+       garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
++      garp_attr_destroy_all(app);
+       garp_pdu_queue(app);
+       spin_unlock_bh(&app->lock);
+ 
+diff --git a/net/802/mrp.c b/net/802/mrp.c
+index be4dd31653474..7a893a03e7957 100644
+--- a/net/802/mrp.c
++++ b/net/802/mrp.c
+@@ -295,6 +295,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, 
struct mrp_attr *attr)
+       kfree(attr);
+ }
+ 
++static void mrp_attr_destroy_all(struct mrp_applicant *app)
++{
++      struct rb_node *node, *next;
++      struct mrp_attr *attr;
++
++      for (node = rb_first(&app->mad);
++           next = node ? rb_next(node) : NULL, node != NULL;
++           node = next) {
++              attr = rb_entry(node, struct mrp_attr, node);
++              mrp_attr_destroy(app, attr);
++      }
++}
++
+ static int mrp_pdu_init(struct mrp_applicant *app)
+ {
+       struct sk_buff *skb;
+@@ -899,6 +912,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct 
mrp_application *appl)
+ 
+       spin_lock_bh(&app->lock);
+       mrp_mad_event(app, MRP_EVENT_TX);
++      mrp_attr_destroy_all(app);
+       mrp_pdu_queue(app);
+       spin_unlock_bh(&app->lock);
+ 
+diff --git a/net/Makefile b/net/Makefile
+index 14fede520840e..d05dc71996631 100644
+--- a/net/Makefile
++++ b/net/Makefile
+@@ -18,7 +18,7 @@ obj-$(CONFIG_NETFILTER)              += netfilter/
+ obj-$(CONFIG_INET)            += ipv4/
+ obj-$(CONFIG_TLS)             += tls/
+ obj-$(CONFIG_XFRM)            += xfrm/
+-obj-$(CONFIG_UNIX)            += unix/
++obj-$(CONFIG_UNIX_SCM)                += unix/
+ obj-$(CONFIG_NET)             += ipv6/
+ obj-$(CONFIG_PACKET)          += packet/
+ obj-$(CONFIG_NET_KEY)         += key/
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 24af08164b614..2a987a6ea6d73 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -549,10 +549,18 @@ static int raw_setsockopt(struct socket *sock, int 
level, int optname,
+                               return -EFAULT;
+               }
+ 
++              rtnl_lock();
+               lock_sock(sk);
+ 
+-              if (ro->bound && ro->ifindex)
++              if (ro->bound && ro->ifindex) {
+                       dev = dev_get_by_index(sock_net(sk), ro->ifindex);
++                      if (!dev) {
++                              if (count > 1)
++                                      kfree(filter);
++                              err = -ENODEV;
++                              goto out_fil;
++                      }
++              }
+ 
+               if (ro->bound) {
+                       /* (try to) register the new filters */
+@@ -591,6 +599,7 @@ static int raw_setsockopt(struct socket *sock, int level, 
int optname,
+                       dev_put(dev);
+ 
+               release_sock(sk);
++              rtnl_unlock();
+ 
+               break;
+ 
+@@ -603,10 +612,16 @@ static int raw_setsockopt(struct socket *sock, int 
level, int optname,
+ 
+               err_mask &= CAN_ERR_MASK;
+ 
++              rtnl_lock();
+               lock_sock(sk);
+ 
+-              if (ro->bound && ro->ifindex)
++              if (ro->bound && ro->ifindex) {
+                       dev = dev_get_by_index(sock_net(sk), ro->ifindex);
++                      if (!dev) {
++                              err = -ENODEV;
++                              goto out_err;
++                      }
++              }
+ 
+               /* remove current error mask */
+               if (ro->bound) {
+@@ -630,6 +645,7 @@ static int raw_setsockopt(struct socket *sock, int level, 
int optname,
+                       dev_put(dev);
+ 
+               release_sock(sk);
++              rtnl_unlock();
+ 
+               break;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index aa419f3162b8c..ea09e0809c122 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4763,7 +4763,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
+ 
+       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
+           pinfo->nr_frags &&
+-          !PageHighMem(skb_frag_page(frag0))) {
++          !PageHighMem(skb_frag_page(frag0)) &&
++          (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
+               NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+               NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+                                                   skb_frag_size(frag0),
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 3b65fedf77ca7..699bd3052c611 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1023,7 +1023,7 @@ set_rcvbuf:
+                       if (val < 0)
+                               ret = -EINVAL;
+                       else
+-                              sk->sk_ll_usec = val;
++                              WRITE_ONCE(sk->sk_ll_usec, val);
+               }
+               break;
+ #endif
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index d301ac51bbe1d..ec48fb3fd30eb 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct 
sockaddr_llc *addr)
+ {
+       u8 rc = LLC_PDU_LEN_U;
+ 
+-      if (addr->sllc_test || addr->sllc_xid)
++      if (addr->sllc_test)
+               rc = LLC_PDU_LEN_U;
++      else if (addr->sllc_xid)
++              /* We need to expand header to sizeof(struct llc_xid_info)
++               * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC 
header
++               * as XID PDU. In llc_ui_sendmsg() we reserved header size and 
then
++               * filled all other space with user data. If we won't reserve 
this
++               * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
++               */
++              rc = LLC_PDU_LEN_U_XID;
+       else if (sk->sk_type == SOCK_STREAM)
+               rc = LLC_PDU_LEN_I;
+       return rc;
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 7ae4cc684d3ab..9fa3342c7a829 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct 
sk_buff *skb)
+       struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+       int rc;
+ 
+-      llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
++      llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
+                           ev->daddr.lsap, LLC_PDU_CMD);
+       llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
+       rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
+index ede0ab5dc400a..f13b476378aa1 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -506,8 +506,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int 
report)
+               return false;
+ 
+       tstamp = nf_conn_tstamp_find(ct);
+-      if (tstamp && tstamp->stop == 0)
++      if (tstamp) {
++              s32 timeout = ct->timeout - nfct_time_stamp;
++
+               tstamp->stop = ktime_get_real_ns();
++              if (timeout < 0)
++                      tstamp->stop -= jiffies_to_nsecs(-timeout);
++      }
+ 
+       if (nf_conntrack_event_report(IPCT_DESTROY, ct,
+                                   portid, report) < 0) {
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index a18cceecef88e..04dd813ed7755 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -153,7 +153,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const 
struct nft_expr *expr,
+               alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
+               break;
+       default:
+-              return -EAFNOSUPPORT;
++              if (tb[NFTA_NAT_REG_ADDR_MIN])
++                      return -EAFNOSUPPORT;
++              break;
+       }
+       priv->family = family;
+ 
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 1af35b69e99e9..90428c59cfaf8 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -1125,7 +1125,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
+       if (unlikely(!af))
+               return NULL;
+ 
+-      if (af->from_addr_param(&paddr, param, peer_port, 0))
++      if (!af->from_addr_param(&paddr, param, peer_port, 0))
+               return NULL;
+ 
+       return __sctp_lookup_association(net, laddr, &paddr, transportp);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index d5cf05efddfd8..868b97607601a 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -423,7 +423,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
+               retval = SCTP_SCOPE_LINK;
+       } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
+                  ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
+-                 ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
++                 ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
++                 ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
+               retval = SCTP_SCOPE_PRIVATE;
+       } else {
+               retval = SCTP_SCOPE_GLOBAL;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 14e6cb814e4ce..2e4d892768f9d 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2001,7 +2001,7 @@ static int tipc_listen(struct socket *sock, int len)
+ static int tipc_wait_for_accept(struct socket *sock, long timeo)
+ {
+       struct sock *sk = sock->sk;
+-      DEFINE_WAIT(wait);
++      DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       int err;
+ 
+       /* True wake-one mechanism for incoming connections: only
+@@ -2010,12 +2010,12 @@ static int tipc_wait_for_accept(struct socket *sock, 
long timeo)
+        * anymore, the common case will execute the loop only once.
+       */
+       for (;;) {
+-              prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+-                                        TASK_INTERRUPTIBLE);
+               if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
++                      add_wait_queue(sk_sleep(sk), &wait);
+                       release_sock(sk);
+-                      timeo = schedule_timeout(timeo);
++                      timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+                       lock_sock(sk);
++                      remove_wait_queue(sk_sleep(sk), &wait);
+               }
+               err = 0;
+               if (!skb_queue_empty(&sk->sk_receive_queue))
+@@ -2027,7 +2027,6 @@ static int tipc_wait_for_accept(struct socket *sock, 
long timeo)
+               if (signal_pending(current))
+                       break;
+       }
+-      finish_wait(sk_sleep(sk), &wait);
+       return err;
+ }
+ 
+diff --git a/net/unix/Kconfig b/net/unix/Kconfig
+index 8b31ab85d050f..3b9e450656a4d 100644
+--- a/net/unix/Kconfig
++++ b/net/unix/Kconfig
+@@ -19,6 +19,11 @@ config UNIX
+ 
+         Say Y unless you know what you are doing.
+ 
++config UNIX_SCM
++      bool
++      depends on UNIX
++      default y
++
+ config UNIX_DIAG
+       tristate "UNIX: socket monitoring interface"
+       depends on UNIX
+diff --git a/net/unix/Makefile b/net/unix/Makefile
+index ffd0a275c3a79..54e58cc4f9450 100644
+--- a/net/unix/Makefile
++++ b/net/unix/Makefile
+@@ -10,3 +10,5 @@ unix-$(CONFIG_SYSCTL)        += sysctl_net_unix.o
+ 
+ obj-$(CONFIG_UNIX_DIAG)       += unix_diag.o
+ unix_diag-y           := diag.o
++
++obj-$(CONFIG_UNIX_SCM)        += scm.o
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 8e7054fc27f8b..59009739d324b 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -119,6 +119,8 @@
+ #include <linux/freezer.h>
+ #include <linux/file.h>
+ 
++#include "scm.h"
++
+ struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
+ EXPORT_SYMBOL_GPL(unix_socket_table);
+ DEFINE_SPINLOCK(unix_table_lock);
+@@ -1519,65 +1521,51 @@ out:
+       return err;
+ }
+ 
+-static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+-{
+-      int i;
+-
+-      scm->fp = UNIXCB(skb).fp;
+-      UNIXCB(skb).fp = NULL;
+-
+-      for (i = scm->fp->count-1; i >= 0; i--)
+-              unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+-}
+-
+-static void unix_destruct_scm(struct sk_buff *skb)
+-{
+-      struct scm_cookie scm;
+-      memset(&scm, 0, sizeof(scm));
+-      scm.pid  = UNIXCB(skb).pid;
+-      if (UNIXCB(skb).fp)
+-              unix_detach_fds(&scm, skb);
+-
+-      /* Alas, it calls VFS */
+-      /* So fscking what? fput() had been SMP-safe since the last Summer */
+-      scm_destroy(&scm);
+-      sock_wfree(skb);
+-}
+-
+-/*
+- * The "user->unix_inflight" variable is protected by the garbage
+- * collection lock, and we just read it locklessly here. If you go
+- * over the limit, there might be a tiny race in actually noticing
+- * it across threads. Tough.
+- */
+-static inline bool too_many_unix_fds(struct task_struct *p)
+-{
+-      struct user_struct *user = current_user();
+-
+-      if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+-              return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+-      return false;
+-}
+-
+-static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ {
+-      int i;
+-
+-      if (too_many_unix_fds(current))
+-              return -ETOOMANYREFS;
++      scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+ 
+       /*
+-       * Need to duplicate file references for the sake of garbage
+-       * collection.  Otherwise a socket in the fps might become a
+-       * candidate for GC while the skb is not yet queued.
++       * Garbage collection of unix sockets starts by selecting a set of
++       * candidate sockets which have reference only from being in flight
++       * (total_refs == inflight_refs).  This condition is checked once during
++       * the candidate collection phase, and candidates are marked as such, so
++       * that non-candidates can later be ignored.  While inflight_refs is
++       * protected by unix_gc_lock, total_refs (file count) is not, hence this
++       * is an instantaneous decision.
++       *
++       * Once a candidate, however, the socket must not be reinstalled into a
++       * file descriptor while the garbage collection is in progress.
++       *
++       * If the above conditions are met, then the directed graph of
++       * candidates (*) does not change while unix_gc_lock is held.
++       *
++       * Any operations that changes the file count through file descriptors
++       * (dup, close, sendmsg) does not change the graph since candidates are
++       * not installed in fds.
++       *
++       * Dequeing a candidate via recvmsg would install it into an fd, but
++       * that takes unix_gc_lock to decrement the inflight count, so it's
++       * serialized with garbage collection.
++       *
++       * MSG_PEEK is special in that it does not change the inflight count,
++       * yet does install the socket into an fd.  The following lock/unlock
++       * pair is to ensure serialization with garbage collection.  It must be
++       * done between incrementing the file count and installing the file into
++       * an fd.
++       *
++       * If garbage collection starts after the barrier provided by the
++       * lock/unlock, then it will see the elevated refcount and not mark this
++       * as a candidate.  If a garbage collection is already in progress
++       * before the file count was incremented, then the lock/unlock pair will
++       * ensure that garbage collection is finished before progressing to
++       * installing the fd.
++       *
++       * (*) A -> B where B is on the queue of A or B is on the queue of C
++       * which is on the queue of listening socket A.
+        */
+-      UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+-      if (!UNIXCB(skb).fp)
+-              return -ENOMEM;
+-
+-      for (i = scm->fp->count - 1; i >= 0; i--)
+-              unix_inflight(scm->fp->user, scm->fp->fp[i]);
+-      return 0;
++      spin_lock(&unix_gc_lock);
++      spin_unlock(&unix_gc_lock);
+ }
+ 
+ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool 
send_fds)
+@@ -2205,7 +2193,7 @@ static int unix_dgram_recvmsg(struct socket *sock, 
struct msghdr *msg,
+               sk_peek_offset_fwd(sk, size);
+ 
+               if (UNIXCB(skb).fp)
+-                      scm.fp = scm_fp_dup(UNIXCB(skb).fp);
++                      unix_peek_fds(&scm, skb);
+       }
+       err = (flags & MSG_TRUNC) ? skb->len - skip : size;
+ 
+@@ -2446,7 +2434,7 @@ unlock:
+                       /* It is questionable, see note in unix_dgram_recvmsg.
+                        */
+                       if (UNIXCB(skb).fp)
+-                              scm.fp = scm_fp_dup(UNIXCB(skb).fp);
++                              unix_peek_fds(&scm, skb);
+ 
+                       sk_peek_offset_fwd(sk, chunk);
+ 
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index c36757e728442..8bbe1b8e4ff7f 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -86,77 +86,13 @@
+ #include <net/scm.h>
+ #include <net/tcp_states.h>
+ 
++#include "scm.h"
++
+ /* Internal data structures and random procedures: */
+ 
+-static LIST_HEAD(gc_inflight_list);
+ static LIST_HEAD(gc_candidates);
+-static DEFINE_SPINLOCK(unix_gc_lock);
+ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
+ 
+-unsigned int unix_tot_inflight;
+-
+-struct sock *unix_get_socket(struct file *filp)
+-{
+-      struct sock *u_sock = NULL;
+-      struct inode *inode = file_inode(filp);
+-
+-      /* Socket ? */
+-      if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+-              struct socket *sock = SOCKET_I(inode);
+-              struct sock *s = sock->sk;
+-
+-              /* PF_UNIX ? */
+-              if (s && sock->ops && sock->ops->family == PF_UNIX)
+-                      u_sock = s;
+-      }
+-      return u_sock;
+-}
+-
+-/* Keep the number of times in flight count for the file
+- * descriptor if it is for an AF_UNIX socket.
+- */
+-
+-void unix_inflight(struct user_struct *user, struct file *fp)
+-{
+-      struct sock *s = unix_get_socket(fp);
+-
+-      spin_lock(&unix_gc_lock);
+-
+-      if (s) {
+-              struct unix_sock *u = unix_sk(s);
+-
+-              if (atomic_long_inc_return(&u->inflight) == 1) {
+-                      BUG_ON(!list_empty(&u->link));
+-                      list_add_tail(&u->link, &gc_inflight_list);
+-              } else {
+-                      BUG_ON(list_empty(&u->link));
+-              }
+-              unix_tot_inflight++;
+-      }
+-      user->unix_inflight++;
+-      spin_unlock(&unix_gc_lock);
+-}
+-
+-void unix_notinflight(struct user_struct *user, struct file *fp)
+-{
+-      struct sock *s = unix_get_socket(fp);
+-
+-      spin_lock(&unix_gc_lock);
+-
+-      if (s) {
+-              struct unix_sock *u = unix_sk(s);
+-
+-              BUG_ON(!atomic_long_read(&u->inflight));
+-              BUG_ON(list_empty(&u->link));
+-
+-              if (atomic_long_dec_and_test(&u->inflight))
+-                      list_del_init(&u->link);
+-              unix_tot_inflight--;
+-      }
+-      user->unix_inflight--;
+-      spin_unlock(&unix_gc_lock);
+-}
+-
+ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
+                         struct sk_buff_head *hitlist)
+ {
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+new file mode 100644
+index 0000000000000..e13d320c41c70
+--- /dev/null
++++ b/net/unix/scm.c
+@@ -0,0 +1,149 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/socket.h>
++#include <linux/net.h>
++#include <linux/fs.h>
++#include <net/af_unix.h>
++#include <net/scm.h>
++#include <linux/init.h>
++#include <linux/sched/signal.h>
++
++#include "scm.h"
++
++unsigned int unix_tot_inflight;
++EXPORT_SYMBOL(unix_tot_inflight);
++
++LIST_HEAD(gc_inflight_list);
++EXPORT_SYMBOL(gc_inflight_list);
++
++DEFINE_SPINLOCK(unix_gc_lock);
++EXPORT_SYMBOL(unix_gc_lock);
++
++struct sock *unix_get_socket(struct file *filp)
++{
++      struct sock *u_sock = NULL;
++      struct inode *inode = file_inode(filp);
++
++      /* Socket ? */
++      if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
++              struct socket *sock = SOCKET_I(inode);
++              struct sock *s = sock->sk;
++
++              /* PF_UNIX ? */
++              if (s && sock->ops && sock->ops->family == PF_UNIX)
++                      u_sock = s;
++      }
++      return u_sock;
++}
++EXPORT_SYMBOL(unix_get_socket);
++
++/* Keep the number of times in flight count for the file
++ * descriptor if it is for an AF_UNIX socket.
++ */
++void unix_inflight(struct user_struct *user, struct file *fp)
++{
++      struct sock *s = unix_get_socket(fp);
++
++      spin_lock(&unix_gc_lock);
++
++      if (s) {
++              struct unix_sock *u = unix_sk(s);
++
++              if (atomic_long_inc_return(&u->inflight) == 1) {
++                      BUG_ON(!list_empty(&u->link));
++                      list_add_tail(&u->link, &gc_inflight_list);
++              } else {
++                      BUG_ON(list_empty(&u->link));
++              }
++              unix_tot_inflight++;
++      }
++      user->unix_inflight++;
++      spin_unlock(&unix_gc_lock);
++}
++
++void unix_notinflight(struct user_struct *user, struct file *fp)
++{
++      struct sock *s = unix_get_socket(fp);
++
++      spin_lock(&unix_gc_lock);
++
++      if (s) {
++              struct unix_sock *u = unix_sk(s);
++
++              BUG_ON(!atomic_long_read(&u->inflight));
++              BUG_ON(list_empty(&u->link));
++
++              if (atomic_long_dec_and_test(&u->inflight))
++                      list_del_init(&u->link);
++              unix_tot_inflight--;
++      }
++      user->unix_inflight--;
++      spin_unlock(&unix_gc_lock);
++}
++
++/*
++ * The "user->unix_inflight" variable is protected by the garbage
++ * collection lock, and we just read it locklessly here. If you go
++ * over the limit, there might be a tiny race in actually noticing
++ * it across threads. Tough.
++ */
++static inline bool too_many_unix_fds(struct task_struct *p)
++{
++      struct user_struct *user = current_user();
++
++      if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
++              return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
++      return false;
++}
++
++int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
++{
++      int i;
++
++      if (too_many_unix_fds(current))
++              return -ETOOMANYREFS;
++
++      /*
++       * Need to duplicate file references for the sake of garbage
++       * collection.  Otherwise a socket in the fps might become a
++       * candidate for GC while the skb is not yet queued.
++       */
++      UNIXCB(skb).fp = scm_fp_dup(scm->fp);
++      if (!UNIXCB(skb).fp)
++              return -ENOMEM;
++
++      for (i = scm->fp->count - 1; i >= 0; i--)
++              unix_inflight(scm->fp->user, scm->fp->fp[i]);
++      return 0;
++}
++EXPORT_SYMBOL(unix_attach_fds);
++
++void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
++{
++      int i;
++
++      scm->fp = UNIXCB(skb).fp;
++      UNIXCB(skb).fp = NULL;
++
++      for (i = scm->fp->count-1; i >= 0; i--)
++              unix_notinflight(scm->fp->user, scm->fp->fp[i]);
++}
++EXPORT_SYMBOL(unix_detach_fds);
++
++void unix_destruct_scm(struct sk_buff *skb)
++{
++      struct scm_cookie scm;
++
++      memset(&scm, 0, sizeof(scm));
++      scm.pid  = UNIXCB(skb).pid;
++      if (UNIXCB(skb).fp)
++              unix_detach_fds(&scm, skb);
++
++      /* Alas, it calls VFS */
++      /* So fscking what? fput() had been SMP-safe since the last Summer */
++      scm_destroy(&scm);
++      sock_wfree(skb);
++}
++EXPORT_SYMBOL(unix_destruct_scm);
+diff --git a/net/unix/scm.h b/net/unix/scm.h
+new file mode 100644
+index 0000000000000..5a255a477f160
+--- /dev/null
++++ b/net/unix/scm.h
+@@ -0,0 +1,10 @@
++#ifndef NET_UNIX_SCM_H
++#define NET_UNIX_SCM_H
++
++extern struct list_head gc_inflight_list;
++extern spinlock_t unix_gc_lock;
++
++int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
++void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
++
++#endif
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 2a67267ab6b4f..8bbcdcf5c829d 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1026,16 +1026,14 @@ cfg80211_bss_update(struct cfg80211_registered_device 
*rdev,
+                        * be grouped with this beacon for updates ...
+                        */
+                       if (!cfg80211_combine_bsses(rdev, new)) {
+-                              kfree(new);
++                              bss_ref_put(rdev, new);
+                               goto drop;
+                       }
+               }
+ 
+               if (rdev->bss_entries >= bss_entries_limit &&
+                   !cfg80211_bss_expire_oldest(rdev)) {
+-                      if (!list_empty(&new->hidden_list))
+-                              list_del(&new->hidden_list);
+-                      kfree(new);
++                      bss_ref_put(rdev, new);
+                       goto drop;
+               }
+ 
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index d7c34feef58ea..9d42a2821ecb9 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -216,8 +216,6 @@ struct map *map__new(struct machine *machine, u64 start, 
u64 len,
+                       if (type != MAP__FUNCTION)
+                               dso__set_loaded(dso, map->type);
+               }
+-
+-              nsinfo__put(dso->nsinfo);
+               dso->nsinfo = nsi;
+               dso__put(dso);
+       }
+diff --git a/tools/testing/selftests/vm/userfaultfd.c 
b/tools/testing/selftests/vm/userfaultfd.c
+index 16d42b2de424e..1963440f67251 100644
+--- a/tools/testing/selftests/vm/userfaultfd.c
++++ b/tools/testing/selftests/vm/userfaultfd.c
+@@ -131,7 +131,7 @@ static void anon_allocate_area(void **alloc_area)
+ {
+       *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+                          MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+-      if (*alloc_area == MAP_FAILED)
++      if (*alloc_area == MAP_FAILED) {
+               fprintf(stderr, "mmap of anonymous memory failed");
+               *alloc_area = NULL;
+       }

Reply via email to