The following commit has been merged in the master branch:
commit f4915933947c71f08ed1c5a6c9b4fdbe735e18cf
Merge: 6855b9be9cf70d3fd4b4b9a00696eae65335320c 
5fc31936081919a8572a3d644f3fbb258038f337
Author: Paolo Abeni <[email protected]>
Date:   Thu Mar 20 21:32:20 2025 +0100

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
    
    Cross-merge networking fixes after downstream PR (net-6.14-rc8).
    
    Conflict:
    
    tools/testing/selftests/net/Makefile
      03544faad761 ("selftest: net: add proc_net_pktgen")
      3ed61b8938c6 ("selftests: net: test for lwtunnel dst ref loops")
    
    tools/testing/selftests/net/config:
      85cb3711acb8 ("selftests: net: Add test cases for link and peer netns")
      3ed61b8938c6 ("selftests: net: test for lwtunnel dst ref loops")
    
    Adjacent commits:
    
    tools/testing/selftests/net/Makefile
      c935af429ec2 ("selftests: net: add support for testing SO_RCVMARK and 
SO_RCVPRIORITY")
      355d940f4d5a ("Revert "selftests: Add IPv6 link-local address generation 
tests for GRE devices."")
    
    Signed-off-by: Paolo Abeni <[email protected]>

diff --combined MAINTAINERS
index 241ca9e260a29,00e94bec401e1..5959513a7359f
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -727,7 -727,7 +727,7 @@@ L: [email protected] (
  L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
 -F:    drivers/net/ethernet/mediatek/airoha_eth.c
 +F:    drivers/net/ethernet/airoha/
  
  AIROHA PCIE PHY DRIVER
  M:    Lorenzo Bianconi <[email protected]>
@@@ -2213,6 -2213,7 +2213,7 @@@ ARM/APPLE MACHINE SUPPOR
  M:    Sven Peter <[email protected]>
  M:    Janne Grunau <[email protected]>
  R:    Alyssa Rosenzweig <[email protected]>
+ R:    Neal Gompa <[email protected]>
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
@@@ -2237,6 -2238,7 +2238,7 @@@ F:      Documentation/devicetree/bindings/pc
  F:    Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
  F:    Documentation/devicetree/bindings/power/apple*
  F:    Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml
+ F:    Documentation/devicetree/bindings/spi/apple,spi.yaml
  F:    Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
  F:    arch/arm64/boot/dts/apple/
  F:    drivers/bluetooth/hci_bcm4377.c
@@@ -2254,6 -2256,7 +2256,7 @@@ F:      drivers/nvmem/apple-efuses.
  F:    drivers/pinctrl/pinctrl-apple-gpio.c
  F:    drivers/pwm/pwm-apple.c
  F:    drivers/soc/apple/*
+ F:    drivers/spi/spi-apple.c
  F:    drivers/watchdog/apple_wdt.c
  F:    include/dt-bindings/interrupt-controller/apple-aic.h
  F:    include/dt-bindings/pinctrl/apple.h
@@@ -8644,7 -8647,6 +8647,6 @@@ F:      rust/kernel/net/phy/reg.r
  
  EXEC & BINFMT API, ELF
  M:    Kees Cook <[email protected]>
- R:    Eric Biederman <[email protected]>
  L:    [email protected]
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git 
for-next/execve
@@@ -9829,7 -9831,6 +9831,6 @@@ S:      Maintaine
  F:    drivers/media/usb/go7007/
  
  GOODIX TOUCHSCREEN
- M:    Bastien Nocera <[email protected]>
  M:    Hans de Goede <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -11141,7 -11142,7 +11142,7 @@@ S:   Maintaine
  F:    drivers/i2c/busses/i2c-icy.c
  
  IDEAPAD LAPTOP EXTRAS DRIVER
- M:    Ike Panhc <[email protected]>
+ M:    Ike Panhc <[email protected]>
  L:    [email protected]
  S:    Maintained
  W:    http://launchpad.net/ideapad-laptop
@@@ -11872,7 -11873,6 +11873,7 @@@ L:   [email protected]
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-platform-intel-pmc
  F:    drivers/platform/x86/intel/pmc/
 +F:    linux/platform_data/x86/intel_pmc_ipc.h
  
  INTEL PMIC GPIO DRIVERS
  M:    Andy Shevchenko <[email protected]>
@@@ -12826,9 -12826,7 +12827,7 @@@ F:   fs/kernfs
  F:    include/linux/kernfs.h
  
  KEXEC
- M:    Eric Biederman <[email protected]>
  L:    [email protected]
- S:    Maintained
  W:    http://kernel.org/pub/linux/utils/kernel/kexec/
  F:    include/linux/kexec.h
  F:    include/uapi/linux/kexec.h
@@@ -13754,12 -13752,10 +13753,10 @@@ F:        drivers/hwmon/ltc4282.
  
  LTC4286 HARDWARE MONITOR DRIVER
  M:    Delphine CC Chiu <[email protected]>
- L:    [email protected]
+ L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/hwmon/lltc,ltc4286.yaml
  F:    Documentation/hwmon/ltc4286.rst
- F:    drivers/hwmon/pmbus/Kconfig
- F:    drivers/hwmon/pmbus/Makefile
  F:    drivers/hwmon/pmbus/ltc4286.c
  
  LTC4306 I2C MULTIPLEXER DRIVER
@@@ -13907,7 -13903,6 +13904,7 @@@ L:   [email protected]
  S:    Maintained
  F:    Documentation/networking/mctp.rst
  F:    drivers/net/mctp/
 +F:    include/linux/usb/mctp-usb.h
  F:    include/net/mctp.h
  F:    include/net/mctpdevice.h
  F:    include/net/netns/mctp.h
@@@ -16665,6 -16660,17 +16662,17 @@@ F: net/mptcp
  F:    tools/testing/selftests/bpf/*/*mptcp*.[ch]
  F:    tools/testing/selftests/net/mptcp/
  
+ NETWORKING [SRv6]
+ M:    Andrea Mayer <[email protected]>
+ L:    [email protected]
+ S:    Maintained
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+ F:    include/linux/seg6*
+ F:    include/net/seg6*
+ F:    include/uapi/linux/seg6*
+ F:    net/ipv6/seg6*
+ F:    tools/testing/selftests/net/srv6*
+ 
  NETWORKING [TCP]
  M:    Eric Dumazet <[email protected]>
  M:    Neal Cardwell <[email protected]>
@@@ -24191,7 -24197,7 +24199,7 @@@ W:   http://vtun.sourceforge.net/tu
  F:    Documentation/networking/tuntap.rst
  F:    arch/um/os-Linux/drivers/
  F:    drivers/net/tap.c
 -F:    drivers/net/tun.c
 +F:    drivers/net/tun*
  
  TURBOCHANNEL SUBSYSTEM
  M:    "Maciej W. Rozycki" <[email protected]>
diff --combined drivers/dpll/dpll_core.c
index 940c26b9dd53c,1877201d1aa9f..20bdc52f63a50
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@@ -443,11 -443,8 +443,11 @@@ static void dpll_pin_prop_free(struct d
  static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
                             struct dpll_pin_properties *dst)
  {
 +      if (WARN_ON(src->freq_supported && !src->freq_supported_num))
 +              return -EINVAL;
 +
        memcpy(dst, src, sizeof(*dst));
 -      if (src->freq_supported && src->freq_supported_num) {
 +      if (src->freq_supported) {
                size_t freq_size = src->freq_supported_num *
                                   sizeof(*src->freq_supported);
                dst->freq_supported = kmemdup(src->freq_supported,
@@@ -508,7 -505,7 +508,7 @@@ dpll_pin_alloc(u64 clock_id, u32 pin_id
        xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
        ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
                              &dpll_pin_xa_id, GFP_KERNEL);
-       if (ret)
+       if (ret < 0)
                goto err_xa_alloc;
        return pin;
  err_xa_alloc:
diff --combined drivers/net/can/flexcan/flexcan-core.c
index 7588cb54a909a,b080740bcb104..6d80c341b26fd
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@@ -26,7 -26,6 +26,7 @@@
  #include <linux/pinctrl/consumer.h>
  #include <linux/platform_device.h>
  #include <linux/can/platform/flexcan.h>
 +#include <linux/phy/phy.h>
  #include <linux/pm_runtime.h>
  #include <linux/property.h>
  #include <linux/regmap.h>
@@@ -387,16 -386,6 +387,16 @@@ static const struct flexcan_devtype_dat
                FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
  };
  
 +static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
 +      .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
 +              FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
 +              FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
 +              FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
 +              FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
 +              FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
 +              FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
 +};
 +
  static const struct can_bittiming_const flexcan_bittiming_const = {
        .name = DRV_NAME,
        .tseg1_min = 4,
@@@ -645,22 -634,18 +645,22 @@@ static void flexcan_clks_disable(const 
  
  static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
  {
 -      if (!priv->reg_xceiver)
 -              return 0;
 +      if (priv->reg_xceiver)
 +              return regulator_enable(priv->reg_xceiver);
 +      else if (priv->transceiver)
 +              return phy_power_on(priv->transceiver);
  
 -      return regulator_enable(priv->reg_xceiver);
 +      return 0;
  }
  
  static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
  {
 -      if (!priv->reg_xceiver)
 -              return 0;
 +      if (priv->reg_xceiver)
 +              return regulator_disable(priv->reg_xceiver);
 +      else if (priv->transceiver)
 +              return phy_power_off(priv->transceiver);
  
 -      return regulator_disable(priv->reg_xceiver);
 +      return 0;
  }
  
  static int flexcan_chip_enable(struct flexcan_priv *priv)
@@@ -1777,25 -1762,14 +1777,25 @@@ static int flexcan_open(struct net_devi
                        goto out_free_irq_boff;
        }
  
 +      if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
 +              err = request_irq(priv->irq_secondary_mb,
 +                                flexcan_irq, IRQF_SHARED, dev->name, dev);
 +              if (err)
 +                      goto out_free_irq_err;
 +      }
 +
        flexcan_chip_interrupts_enable(dev);
  
        netif_start_queue(dev);
  
        return 0;
  
 + out_free_irq_err:
 +      if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
 +              free_irq(priv->irq_err, dev);
   out_free_irq_boff:
 -      free_irq(priv->irq_boff, dev);
 +      if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
 +              free_irq(priv->irq_boff, dev);
   out_free_irq:
        free_irq(dev->irq, dev);
   out_can_rx_offload_disable:
@@@ -1820,9 -1794,6 +1820,9 @@@ static int flexcan_close(struct net_dev
        netif_stop_queue(dev);
        flexcan_chip_interrupts_disable(dev);
  
 +      if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
 +              free_irq(priv->irq_secondary_mb, dev);
 +
        if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
                free_irq(priv->irq_err, dev);
                free_irq(priv->irq_boff, dev);
@@@ -2070,7 -2041,6 +2070,7 @@@ static const struct of_device_id flexca
        { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
        { .compatible = "fsl,ls1021ar2-flexcan", .data = 
&fsl_ls1021a_r2_devtype_data, },
        { .compatible = "fsl,lx2160ar1-flexcan", .data = 
&fsl_lx2160a_r1_devtype_data, },
 +      { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
        { /* sentinel */ },
  };
  MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@@ -2091,7 -2061,6 +2091,7 @@@ static int flexcan_probe(struct platfor
        struct net_device *dev;
        struct flexcan_priv *priv;
        struct regulator *reg_xceiver;
 +      struct phy *transceiver;
        struct clk *clk_ipg = NULL, *clk_per = NULL;
        struct flexcan_regs __iomem *regs;
        struct flexcan_platform_data *pdata;
@@@ -2107,11 -2076,6 +2107,11 @@@
        else if (IS_ERR(reg_xceiver))
                return PTR_ERR(reg_xceiver);
  
 +      transceiver = devm_phy_optional_get(&pdev->dev, NULL);
 +      if (IS_ERR(transceiver))
 +              return dev_err_probe(&pdev->dev, PTR_ERR(transceiver),
 +                                   "failed to get phy\n");
 +
        if (pdev->dev.of_node) {
                of_property_read_u32(pdev->dev.of_node,
                                     "clock-frequency", &clock_freq);
@@@ -2209,10 -2173,6 +2209,10 @@@
        priv->clk_per = clk_per;
        priv->clk_src = clk_src;
        priv->reg_xceiver = reg_xceiver;
 +      priv->transceiver = transceiver;
 +
 +      if (transceiver)
 +              priv->can.bitrate_max = transceiver->attrs.max_link_rate;
  
        if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
                priv->irq_boff = platform_get_irq(pdev, 1);
@@@ -2227,14 -2187,6 +2227,14 @@@
                }
        }
  
 +      if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
 +              priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
 +              if (priv->irq_secondary_mb < 0) {
 +                      err = priv->irq_secondary_mb;
 +                      goto failed_platform_get_irq;
 +              }
 +      }
 +
        if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
                        CAN_CTRLMODE_FD_NON_ISO;
@@@ -2308,14 -2260,19 +2308,19 @@@ static int __maybe_unused flexcan_suspe
  
                        flexcan_chip_interrupts_disable(dev);
  
+                       err = flexcan_transceiver_disable(priv);
+                       if (err)
+                               return err;
+ 
                        err = pinctrl_pm_select_sleep_state(device);
                        if (err)
                                return err;
                }
                netif_stop_queue(dev);
                netif_device_detach(dev);
+ 
+               priv->can.state = CAN_STATE_SLEEPING;
        }
-       priv->can.state = CAN_STATE_SLEEPING;
  
        return 0;
  }
@@@ -2326,7 -2283,6 +2331,6 @@@ static int __maybe_unused flexcan_resum
        struct flexcan_priv *priv = netdev_priv(dev);
        int err;
  
-       priv->can.state = CAN_STATE_ERROR_ACTIVE;
        if (netif_running(dev)) {
                netif_device_attach(dev);
                netif_start_queue(dev);
@@@ -2340,12 -2296,20 +2344,20 @@@
                        if (err)
                                return err;
  
-                       err = flexcan_chip_start(dev);
+                       err = flexcan_transceiver_enable(priv);
                        if (err)
                                return err;
  
+                       err = flexcan_chip_start(dev);
+                       if (err) {
+                               flexcan_transceiver_disable(priv);
+                               return err;
+                       }
+ 
                        flexcan_chip_interrupts_enable(dev);
                }
+ 
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
        }
  
        return 0;
diff --combined drivers/net/ethernet/microsoft/mana/gdma_main.c
index f1966788c98e3,638ef64d639f3..62dfb6d1638c7
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@@ -134,9 -134,10 +134,10 @@@ static int mana_gd_detect_devices(struc
        struct gdma_list_devices_resp resp = {};
        struct gdma_general_req req = {};
        struct gdma_dev_id dev;
-       u32 i, max_num_devs;
+       int found_dev = 0;
        u16 dev_type;
        int err;
+       u32 i;
  
        mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
                             sizeof(resp));
@@@ -148,12 -149,17 +149,17 @@@
                return err ? err : -EPROTO;
        }
  
-       max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
- 
-       for (i = 0; i < max_num_devs; i++) {
+       for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+            found_dev < resp.num_of_devs; i++) {
                dev = resp.devs[i];
                dev_type = dev.type;
  
+               /* Skip empty devices */
+               if (dev.as_uint32 == 0)
+                       continue;
+ 
+               found_dev++;
+ 
                /* HWC is already detected in mana_hwc_create_channel(). */
                if (dev_type == GDMA_DEVICE_HWC)
                        continue;
@@@ -666,11 -672,8 +672,11 @@@ int mana_gd_create_hwc_queue(struct gdm
  
        gmi = &queue->mem_info;
        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
 -      if (err)
 +      if (err) {
 +              dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory 
allocation err: %d\n",
 +                      spec->type, spec->queue_size, err);
                goto free_q;
 +      }
  
        queue->head = 0;
        queue->tail = 0;
@@@ -691,8 -694,6 +697,8 @@@
        *queue_ptr = queue;
        return 0;
  out:
 +      dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
 +              spec->type, spec->queue_size, err);
        mana_gd_free_memory(gmi);
  free_q:
        kfree(queue);
@@@ -775,13 -776,7 +781,13 @@@ static int mana_gd_create_dma_region(st
        }
  
        gmi->dma_region_handle = resp.dma_region_handle;
 +      dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
 +              gmi->dma_region_handle);
  out:
 +      if (err)
 +              dev_dbg(gc->dev,
 +                      "Failed to create DMA region of length: %u, page_type: 
%d, status: 0x%x, err: %d\n",
 +                      length, req->gdma_page_type, resp.hdr.status, err);
        kfree(req);
        return err;
  }
@@@ -804,11 -799,8 +810,11 @@@ int mana_gd_create_mana_eq(struct gdma_
  
        gmi = &queue->mem_info;
        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
 -      if (err)
 +      if (err) {
 +              dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory 
allocation err: %d\n",
 +                      spec->type, spec->queue_size, err);
                goto free_q;
 +      }
  
        err = mana_gd_create_dma_region(gd, gmi);
        if (err)
@@@ -829,8 -821,6 +835,8 @@@
        *queue_ptr = queue;
        return 0;
  out:
 +      dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: 
%d\n",
 +              spec->type, spec->queue_size, err);
        mana_gd_free_memory(gmi);
  free_q:
        kfree(queue);
@@@ -857,11 -847,8 +863,11 @@@ int mana_gd_create_mana_wq_cq(struct gd
  
        gmi = &queue->mem_info;
        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
 -      if (err)
 +      if (err) {
 +              dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory 
allocation err: %d\n",
 +                      spec->type, spec->queue_size, err);
                goto free_q;
 +      }
  
        err = mana_gd_create_dma_region(gd, gmi);
        if (err)
@@@ -881,8 -868,6 +887,8 @@@
        *queue_ptr = queue;
        return 0;
  out:
 +      dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: 
%d\n",
 +              spec->type, spec->queue_size, err);
        mana_gd_free_memory(gmi);
  free_q:
        kfree(queue);
@@@ -1178,11 -1163,8 +1184,11 @@@ int mana_gd_post_and_ring(struct gdma_q
        int err;
  
        err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
 -      if (err)
 +      if (err) {
 +              dev_err(gc->dev, "Failed to post work req from queue type %d of 
size %u (err=%d)\n",
 +                      queue->type, queue->queue_size, err);
                return err;
 +      }
  
        mana_gd_wq_ring_doorbell(gc, queue);
  
@@@ -1459,10 -1441,8 +1465,10 @@@ static int mana_gd_setup(struct pci_de
        mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
  
        err = mana_gd_setup_irqs(pdev);
 -      if (err)
 +      if (err) {
 +              dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
                return err;
 +      }
  
        err = mana_hwc_create_channel(gc);
        if (err)
@@@ -1480,14 -1460,12 +1486,14 @@@
        if (err)
                goto destroy_hwc;
  
 +      dev_dbg(&pdev->dev, "mana gdma setup successful\n");
        return 0;
  
  destroy_hwc:
        mana_hwc_destroy_channel(gc);
  remove_irq:
        mana_gd_remove_irqs(pdev);
 +      dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
        return err;
  }
  
@@@ -1498,7 -1476,6 +1504,7 @@@ static void mana_gd_cleanup(struct pci_
        mana_hwc_destroy_channel(gc);
  
        mana_gd_remove_irqs(pdev);
 +      dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
  }
  
  static bool mana_is_pf(unsigned short dev_id)
@@@ -1517,10 -1494,8 +1523,10 @@@ static int mana_gd_probe(struct pci_de
        BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
  
        err = pci_enable_device(pdev);
 -      if (err)
 +      if (err) {
 +              dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", 
err);
                return -ENXIO;
 +      }
  
        pci_set_master(pdev);
  
@@@ -1529,10 -1504,9 +1535,10 @@@
                goto disable_dev;
  
        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 -      if (err)
 +      if (err) {
 +              dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
                goto release_region;
 -
 +      }
        dma_set_max_seg_size(&pdev->dev, UINT_MAX);
  
        err = -ENOMEM;
@@@ -1610,8 -1584,6 +1616,8 @@@ static void mana_gd_remove(struct pci_d
  
        pci_release_regions(pdev);
        pci_disable_device(pdev);
 +
 +      dev_dbg(&pdev->dev, "mana gdma remove successful\n");
  }
  
  /* The 'state' parameter is not used. */
diff --combined drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index f18fc4f183feb,b5a7e05ab7a7e..cd431f84f34f6
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@@ -29,21 -29,14 +29,21 @@@ struct tegra_eqos 
        void __iomem *regs;
  
        struct reset_control *rst;
 -      struct clk *clk_master;
        struct clk *clk_slave;
 -      struct clk *clk_tx;
 -      struct clk *clk_rx;
  
        struct gpio_desc *reset;
  };
  
 +static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat,
 +                                  const char *name)
 +{
 +      for (int i = 0; i < plat_dat->num_clks; i++)
 +              if (strcmp(plat_dat->clks[i].id, name) == 0)
 +                      return plat_dat->clks[i].clk;
 +
 +      return NULL;
 +}
 +
  static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
                                   struct plat_stmmacenet_data *plat_dat)
  {
@@@ -53,7 -46,9 +53,9 @@@
        u32 a_index = 0;
  
        if (!plat_dat->axi) {
-               plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+               plat_dat->axi = devm_kzalloc(&pdev->dev,
+                                            sizeof(struct stmmac_axi),
+                                            GFP_KERNEL);
  
                if (!plat_dat->axi)
                        return -ENOMEM;
@@@ -130,9 -125,49 +132,9 @@@ static int dwc_qos_probe(struct platfor
                         struct plat_stmmacenet_data *plat_dat,
                         struct stmmac_resources *stmmac_res)
  {
 -      int err;
 -
 -      plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
 -      if (IS_ERR(plat_dat->stmmac_clk)) {
 -              dev_err(&pdev->dev, "apb_pclk clock not found.\n");
 -              return PTR_ERR(plat_dat->stmmac_clk);
 -      }
 -
 -      err = clk_prepare_enable(plat_dat->stmmac_clk);
 -      if (err < 0) {
 -              dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
 -                      err);
 -              return err;
 -      }
 -
 -      plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
 -      if (IS_ERR(plat_dat->pclk)) {
 -              dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
 -              err = PTR_ERR(plat_dat->pclk);
 -              goto disable;
 -      }
 -
 -      err = clk_prepare_enable(plat_dat->pclk);
 -      if (err < 0) {
 -              dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
 -                      err);
 -              goto disable;
 -      }
 +      plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk");
  
        return 0;
 -
 -disable:
 -      clk_disable_unprepare(plat_dat->stmmac_clk);
 -      return err;
 -}
 -
 -static void dwc_qos_remove(struct platform_device *pdev)
 -{
 -      struct net_device *ndev = platform_get_drvdata(pdev);
 -      struct stmmac_priv *priv = netdev_priv(ndev);
 -
 -      clk_disable_unprepare(priv->plat->pclk);
 -      clk_disable_unprepare(priv->plat->stmmac_clk);
  }
  
  #define SDMEMCOMPPADCTRL 0x8800
@@@ -145,10 -180,11 +147,10 @@@
  #define AUTO_CAL_STATUS 0x880c
  #define  AUTO_CAL_STATUS_ACTIVE BIT(31)
  
 -static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int 
mode)
 +static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
  {
        struct tegra_eqos *eqos = priv;
        bool needs_calibration = false;
 -      long rate = 125000000;
        u32 value;
        int err;
  
@@@ -159,10 -195,11 +161,10 @@@
                fallthrough;
  
        case SPEED_10:
 -              rate = rgmii_clock(speed);
                break;
  
        default:
 -              dev_err(eqos->dev, "invalid speed %u\n", speed);
 +              dev_err(eqos->dev, "invalid speed %d\n", speed);
                break;
        }
  
@@@ -205,6 -242,10 +207,6 @@@
                value &= ~AUTO_CAL_CONFIG_ENABLE;
                writel(value, eqos->regs + AUTO_CAL_CONFIG);
        }
 -
 -      err = clk_set_rate(eqos->clk_tx, rate);
 -      if (err < 0)
 -              dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
  }
  
  static int tegra_eqos_init(struct platform_device *pdev, void *priv)
@@@ -222,7 -263,7 +224,7 @@@
  }
  
  static int tegra_eqos_probe(struct platform_device *pdev,
 -                          struct plat_stmmacenet_data *data,
 +                          struct plat_stmmacenet_data *plat_dat,
                            struct stmmac_resources *res)
  {
        struct device *dev = &pdev->dev;
@@@ -235,24 -276,63 +237,24 @@@
  
        eqos->dev = &pdev->dev;
        eqos->regs = res->addr;
 +      eqos->clk_slave = plat_dat->stmmac_clk;
  
        if (!is_of_node(dev->fwnode))
                goto bypass_clk_reset_gpio;
  
 -      eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
 -      if (IS_ERR(eqos->clk_master)) {
 -              err = PTR_ERR(eqos->clk_master);
 -              goto error;
 -      }
 -
 -      err = clk_prepare_enable(eqos->clk_master);
 -      if (err < 0)
 -              goto error;
 -
 -      eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
 -      if (IS_ERR(eqos->clk_slave)) {
 -              err = PTR_ERR(eqos->clk_slave);
 -              goto disable_master;
 -      }
 -
 -      data->stmmac_clk = eqos->clk_slave;
 -
 -      err = clk_prepare_enable(eqos->clk_slave);
 -      if (err < 0)
 -              goto disable_master;
 -
 -      eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
 -      if (IS_ERR(eqos->clk_rx)) {
 -              err = PTR_ERR(eqos->clk_rx);
 -              goto disable_slave;
 -      }
 -
 -      err = clk_prepare_enable(eqos->clk_rx);
 -      if (err < 0)
 -              goto disable_slave;
 -
 -      eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
 -      if (IS_ERR(eqos->clk_tx)) {
 -              err = PTR_ERR(eqos->clk_tx);
 -              goto disable_rx;
 -      }
 -
 -      err = clk_prepare_enable(eqos->clk_tx);
 -      if (err < 0)
 -              goto disable_rx;
 +      plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx");
  
        eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
        if (IS_ERR(eqos->reset)) {
                err = PTR_ERR(eqos->reset);
 -              goto disable_tx;
 +              return err;
        }
  
        usleep_range(2000, 4000);
        gpiod_set_value(eqos->reset, 0);
  
        /* MDIO bus was already reset just above */
 -      data->mdio_bus_data->needs_reset = false;
 +      plat_dat->mdio_bus_data->needs_reset = false;
  
        eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
        if (IS_ERR(eqos->rst)) {
@@@ -273,11 -353,10 +275,11 @@@
        usleep_range(2000, 4000);
  
  bypass_clk_reset_gpio:
 -      data->fix_mac_speed = tegra_eqos_fix_speed;
 -      data->init = tegra_eqos_init;
 -      data->bsp_priv = eqos;
 -      data->flags |= STMMAC_FLAG_SPH_DISABLE;
 +      plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
 +      plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
 +      plat_dat->init = tegra_eqos_init;
 +      plat_dat->bsp_priv = eqos;
 +      plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
  
        err = tegra_eqos_init(pdev, eqos);
        if (err < 0)
@@@ -288,7 -367,15 +290,7 @@@ reset
        reset_control_assert(eqos->rst);
  reset_phy:
        gpiod_set_value(eqos->reset, 1);
 -disable_tx:
 -      clk_disable_unprepare(eqos->clk_tx);
 -disable_rx:
 -      clk_disable_unprepare(eqos->clk_rx);
 -disable_slave:
 -      clk_disable_unprepare(eqos->clk_slave);
 -disable_master:
 -      clk_disable_unprepare(eqos->clk_master);
 -error:
 +
        return err;
  }
  
@@@ -298,29 -385,27 +300,29 @@@ static void tegra_eqos_remove(struct pl
  
        reset_control_assert(eqos->rst);
        gpiod_set_value(eqos->reset, 1);
 -      clk_disable_unprepare(eqos->clk_tx);
 -      clk_disable_unprepare(eqos->clk_rx);
 -      clk_disable_unprepare(eqos->clk_slave);
 -      clk_disable_unprepare(eqos->clk_master);
  }
  
  struct dwc_eth_dwmac_data {
        int (*probe)(struct platform_device *pdev,
 -                   struct plat_stmmacenet_data *data,
 +                   struct plat_stmmacenet_data *plat_dat,
                     struct stmmac_resources *res);
        void (*remove)(struct platform_device *pdev);
 +      const char *stmmac_clk_name;
  };
  
  static const struct dwc_eth_dwmac_data dwc_qos_data = {
        .probe = dwc_qos_probe,
 -      .remove = dwc_qos_remove,
 +      .stmmac_clk_name = "apb_pclk",
  };
  
  static const struct dwc_eth_dwmac_data tegra_eqos_data = {
        .probe = tegra_eqos_probe,
        .remove = tegra_eqos_remove,
 +      .stmmac_clk_name = "slave_bus",
 +};
 +
 +static const struct dwc_eth_dwmac_data fsd_eqos_data = {
 +      .stmmac_clk_name = "slave_bus",
  };
  
  static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@@ -351,23 -436,9 +353,23 @@@
        if (IS_ERR(plat_dat))
                return PTR_ERR(plat_dat);
  
 -      ret = data->probe(pdev, plat_dat, &stmmac_res);
 +      ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
 +      if (ret < 0)
 +              return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all 
required clocks\n");
 +      plat_dat->num_clks = ret;
 +
 +      ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
 +      if (ret)
 +              return dev_err_probe(&pdev->dev, ret, "Failed to enable 
clocks\n");
 +
 +      plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat,
 +                                              data->stmmac_clk_name);
 +
 +      if (data->probe)
 +              ret = data->probe(pdev, plat_dat, &stmmac_res);
        if (ret < 0) {
                dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
 +              clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
                return ret;
        }
  
@@@ -382,8 -453,7 +384,8 @@@
        return ret;
  
  remove:
 -      data->remove(pdev);
 +      if (data->remove)
 +              data->remove(pdev);
  
        return ret;
  }
@@@ -391,21 -461,15 +393,21 @@@
  static void dwc_eth_dwmac_remove(struct platform_device *pdev)
  {
        const struct dwc_eth_dwmac_data *data = 
device_get_match_data(&pdev->dev);
 +      struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
  
        stmmac_dvr_remove(&pdev->dev);
  
 -      data->remove(pdev);
 +      if (data->remove)
 +              data->remove(pdev);
 +
 +      if (plat_dat)
 +              clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
  }
  
  static const struct of_device_id dwc_eth_dwmac_match[] = {
        { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
        { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
 +      { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
        { }
  };
  MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --combined drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 3e671be95d6f6,bef734c6e5c2b..8b202e8e10e0a
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@@ -164,7 -164,6 +164,7 @@@
  #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
  
  /* XDP */
 +#define AM65_CPSW_XDP_TX       BIT(2)
  #define AM65_CPSW_XDP_CONSUMED BIT(1)
  #define AM65_CPSW_XDP_REDIRECT BIT(0)
  #define AM65_CPSW_XDP_PASS     0
@@@ -830,19 -829,19 +830,19 @@@ static void am65_cpsw_nuss_tx_cleanup(v
  {
        struct am65_cpsw_tx_chn *tx_chn = data;
        enum am65_cpsw_tx_buf_type buf_type;
 +      struct am65_cpsw_tx_swdata *swdata;
        struct cppi5_host_desc_t *desc_tx;
        struct xdp_frame *xdpf;
        struct sk_buff *skb;
 -      void **swdata;
  
        desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
        swdata = cppi5_hdesc_get_swdata(desc_tx);
        buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
        if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
 -              skb = *(swdata);
 +              skb = swdata->skb;
                dev_kfree_skb_any(skb);
        } else {
 -              xdpf = *(swdata);
 +              xdpf = swdata->xdpf;
                xdp_return_frame(xdpf);
        }
  
@@@ -1099,10 -1098,10 +1099,10 @@@ static int am65_cpsw_xdp_tx_frame(struc
        struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
        struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
        struct cppi5_host_desc_t *host_desc;
 +      struct am65_cpsw_tx_swdata *swdata;
        struct netdev_queue *netif_txq;
        dma_addr_t dma_desc, dma_buf;
        u32 pkt_len = xdpf->len;
 -      void **swdata;
        int ret;
  
        host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@@ -1132,8 -1131,7 +1132,8 @@@
        cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
  
        swdata = cppi5_hdesc_get_swdata(host_desc);
 -      *(swdata) = xdpf;
 +      swdata->ndev = ndev;
 +      swdata->xdpf = xdpf;
  
        /* Report BQL before sending the packet */
        netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@@ -1169,16 -1167,17 +1169,16 @@@ pool_free
  
  static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
                             struct am65_cpsw_port *port,
 -                           struct xdp_buff *xdp,
 -                           int cpu, int *len)
 +                           struct xdp_buff *xdp, int *len)
  {
        struct am65_cpsw_common *common = flow->common;
        struct net_device *ndev = port->ndev;
        int ret = AM65_CPSW_XDP_CONSUMED;
        struct am65_cpsw_tx_chn *tx_chn;
        struct netdev_queue *netif_txq;
 +      int cpu = smp_processor_id();
        struct xdp_frame *xdpf;
        struct bpf_prog *prog;
 -      struct page *page;
        int pkt_len;
        u32 act;
        int err;
@@@ -1194,7 -1193,8 +1194,7 @@@
  
        switch (act) {
        case XDP_PASS:
 -              ret = AM65_CPSW_XDP_PASS;
 -              goto out;
 +              return AM65_CPSW_XDP_PASS;
        case XDP_TX:
                tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
                netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@@ -1213,13 -1213,15 +1213,13 @@@
                        goto drop;
  
                dev_sw_netstats_rx_add(ndev, pkt_len);
 -              ret = AM65_CPSW_XDP_CONSUMED;
 -              goto out;
 +              return AM65_CPSW_XDP_TX;
        case XDP_REDIRECT:
                if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
                        goto drop;
  
                dev_sw_netstats_rx_add(ndev, pkt_len);
 -              ret = AM65_CPSW_XDP_REDIRECT;
 -              goto out;
 +              return AM65_CPSW_XDP_REDIRECT;
        default:
                bpf_warn_invalid_xdp_action(ndev, prog, act);
                fallthrough;
@@@ -1231,6 -1233,10 +1231,6 @@@ drop
                ndev->stats.rx_dropped++;
        }
  
 -      page = virt_to_head_page(xdp->data);
 -      am65_cpsw_put_page(flow, page, true);
 -
 -out:
        return ret;
  }
  
@@@ -1268,7 -1274,7 +1268,7 @@@ static void am65_cpsw_nuss_rx_csum(stru
  }
  
  static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
 -                                   int cpu, int *xdp_state)
 +                                   int *xdp_state)
  {
        struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
        u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@@ -1328,13 -1334,8 +1328,13 @@@
                xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
                xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
                                 pkt_len, false);
 -              *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
 -                                             cpu, &pkt_len);
 +              *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
 +              if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
 +                      page = virt_to_head_page(xdp.data);
 +                      am65_cpsw_put_page(flow, page, true);
 +                      goto allocate;
 +              }
 +
                if (*xdp_state != AM65_CPSW_XDP_PASS)
                        goto allocate;
  
@@@ -1400,6 -1401,7 +1400,6 @@@ static int am65_cpsw_nuss_rx_poll(struc
  {
        struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
        struct am65_cpsw_common *common = flow->common;
 -      int cpu = smp_processor_id();
        int xdp_state_or = 0;
        int cur_budget, ret;
        int xdp_state;
@@@ -1408,7 -1410,7 +1408,7 @@@
        /* process only this flow */
        cur_budget = budget;
        while (cur_budget--) {
 -              ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
 +              ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
                xdp_state_or |= xdp_state;
                if (ret)
                        break;
@@@ -1436,6 -1438,52 +1436,6 @@@
        return num_rx;
  }
  
 -static struct sk_buff *
 -am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
 -                                 dma_addr_t desc_dma)
 -{
 -      struct cppi5_host_desc_t *desc_tx;
 -      struct sk_buff *skb;
 -      void **swdata;
 -
 -      desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
 -                                           desc_dma);
 -      swdata = cppi5_hdesc_get_swdata(desc_tx);
 -      skb = *(swdata);
 -      am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
 -
 -      am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
 -
 -      dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
 -
 -      return skb;
 -}
 -
 -static struct xdp_frame *
 -am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
 -                                 struct am65_cpsw_tx_chn *tx_chn,
 -                                 dma_addr_t desc_dma,
 -                                 struct net_device **ndev)
 -{
 -      struct cppi5_host_desc_t *desc_tx;
 -      struct am65_cpsw_port *port;
 -      struct xdp_frame *xdpf;
 -      u32 port_id = 0;
 -      void **swdata;
 -
 -      desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
 -      cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
 -      swdata = cppi5_hdesc_get_swdata(desc_tx);
 -      xdpf = *(swdata);
 -      am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
 -
 -      port = am65_common_get_port(common, port_id);
 -      dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
 -      *ndev = port->ndev;
 -
 -      return xdpf;
 -}
 -
  static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct 
net_device *ndev,
                                   struct netdev_queue *netif_txq)
  {
@@@ -1456,17 -1504,13 +1456,17 @@@
  static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
                                           int chn, unsigned int budget, bool 
*tdown)
  {
 +      bool single_port = AM65_CPSW_IS_CPSW2G(common);
        enum am65_cpsw_tx_buf_type buf_type;
 +      struct am65_cpsw_tx_swdata *swdata;
 +      struct cppi5_host_desc_t *desc_tx;
        struct device *dev = common->dev;
        struct am65_cpsw_tx_chn *tx_chn;
        struct netdev_queue *netif_txq;
        unsigned int total_bytes = 0;
        struct net_device *ndev;
        struct xdp_frame *xdpf;
 +      unsigned int pkt_len;
        struct sk_buff *skb;
        dma_addr_t desc_dma;
        int res, num_tx = 0;
@@@ -1474,12 -1518,9 +1474,12 @@@
        tx_chn = &common->tx_chns[chn];
  
        while (true) {
 -              spin_lock(&tx_chn->lock);
 +              if (!single_port)
 +                      spin_lock(&tx_chn->lock);
                res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
 -              spin_unlock(&tx_chn->lock);
 +              if (!single_port)
 +                      spin_unlock(&tx_chn->lock);
 +
                if (res == -ENODATA)
                        break;
  
@@@ -1490,43 -1531,27 +1490,43 @@@
                        break;
                }
  
 +              desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
 +                                                   desc_dma);
 +              swdata = cppi5_hdesc_get_swdata(desc_tx);
 +              ndev = swdata->ndev;
                buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
                if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
 -                      skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, 
desc_dma);
 -                      ndev = skb->dev;
 -                      total_bytes = skb->len;
 +                      skb = swdata->skb;
 +                      am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
 +                      pkt_len = skb->len;
                        napi_consume_skb(skb, budget);
                } else {
 -                      xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, 
tx_chn,
 -                                                                desc_dma, 
&ndev);
 -                      total_bytes = xdpf->len;
 +                      xdpf = swdata->xdpf;
 +                      pkt_len = xdpf->len;
                        if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
                                xdp_return_frame_rx_napi(xdpf);
                        else
                                xdp_return_frame(xdpf);
                }
 +
 +              total_bytes += pkt_len;
                num_tx++;
 +              am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
 +              dev_sw_netstats_tx_add(ndev, 1, pkt_len);
 +              if (!single_port) {
 +                      /* as packets from multi ports can be interleaved
 +                       * on the same channel, we have to figure out the
 +                       * port/queue at every packet and report it/wake queue.
 +                       */
 +                      netif_txq = netdev_get_tx_queue(ndev, chn);
 +                      netdev_tx_completed_queue(netif_txq, 1, pkt_len);
 +                      am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
 +              }
 +      }
  
 +      if (single_port) {
                netif_txq = netdev_get_tx_queue(ndev, chn);
 -
                netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
 -
                am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
        }
  
@@@ -1535,6 -1560,66 +1535,6 @@@
        return num_tx;
  }
  
 -static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
 -                                            int chn, unsigned int budget, 
bool *tdown)
 -{
 -      enum am65_cpsw_tx_buf_type buf_type;
 -      struct device *dev = common->dev;
 -      struct am65_cpsw_tx_chn *tx_chn;
 -      struct netdev_queue *netif_txq;
 -      unsigned int total_bytes = 0;
 -      struct net_device *ndev;
 -      struct xdp_frame *xdpf;
 -      struct sk_buff *skb;
 -      dma_addr_t desc_dma;
 -      int res, num_tx = 0;
 -
 -      tx_chn = &common->tx_chns[chn];
 -
 -      while (true) {
 -              res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
 -              if (res == -ENODATA)
 -                      break;
 -
 -              if (cppi5_desc_is_tdcm(desc_dma)) {
 -                      if (atomic_dec_and_test(&common->tdown_cnt))
 -                              complete(&common->tdown_complete);
 -                      *tdown = true;
 -                      break;
 -              }
 -
 -              buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
 -              if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
 -                      skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, 
desc_dma);
 -                      ndev = skb->dev;
 -                      total_bytes += skb->len;
 -                      napi_consume_skb(skb, budget);
 -              } else {
 -                      xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, 
tx_chn,
 -                                                                desc_dma, 
&ndev);
 -                      total_bytes += xdpf->len;
 -                      if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
 -                              xdp_return_frame_rx_napi(xdpf);
 -                      else
 -                              xdp_return_frame(xdpf);
 -              }
 -              num_tx++;
 -      }
 -
 -      if (!num_tx)
 -              return 0;
 -
 -      netif_txq = netdev_get_tx_queue(ndev, chn);
 -
 -      netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
 -
 -      am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
 -
 -      dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
 -
 -      return num_tx;
 -}
 -
  static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer 
*timer)
  {
        struct am65_cpsw_tx_chn *tx_chns =
@@@ -1550,8 -1635,13 +1550,8 @@@ static int am65_cpsw_nuss_tx_poll(struc
        bool tdown = false;
        int num_tx;
  
 -      if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
 -              num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, 
tx_chn->id,
 -                                                          budget, &tdown);
 -      else
 -              num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
 -                                                       tx_chn->id, budget, 
&tdown);
 -
 +      num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
 +                                               tx_chn->id, budget, &tdown);
        if (num_tx >= budget)
                return budget;
  
@@@ -1595,12 -1685,12 +1595,12 @@@ static netdev_tx_t am65_cpsw_nuss_ndo_s
        struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
        struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
        struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 +      struct am65_cpsw_tx_swdata *swdata;
        struct device *dev = common->dev;
        struct am65_cpsw_tx_chn *tx_chn;
        struct netdev_queue *netif_txq;
        dma_addr_t desc_dma, buf_dma;
        int ret, q_idx, i;
 -      void **swdata;
        u32 *psdata;
        u32 pkt_len;
  
@@@ -1646,8 -1736,7 +1646,8 @@@
        k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
        cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
        swdata = cppi5_hdesc_get_swdata(first_desc);
 -      *(swdata) = skb;
 +      swdata->ndev = ndev;
 +      swdata->skb = skb;
        psdata = cppi5_hdesc_get_psdata(first_desc);
  
        /* HW csum offload if enabled */
@@@ -2217,14 -2306,18 +2217,18 @@@ static void am65_cpsw_nuss_remove_tx_ch
  static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
  {
        struct device *dev = common->dev;
+       struct am65_cpsw_tx_chn *tx_chn;
        int i, ret = 0;
  
        for (i = 0; i < common->tx_ch_num; i++) {
-               struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+               tx_chn = &common->tx_chns[i];
  
                hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, 
HRTIMER_MODE_REL_PINNED);
                tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
  
+               netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+                                 am65_cpsw_nuss_tx_poll);
+ 
                ret = devm_request_irq(dev, tx_chn->irq,
                                       am65_cpsw_nuss_tx_irq,
                                       IRQF_TRIGGER_HIGH,
@@@ -2234,19 -2327,16 +2238,16 @@@
                                tx_chn->id, tx_chn->irq, ret);
                        goto err;
                }
- 
-               netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
-                                 am65_cpsw_nuss_tx_poll);
        }
  
        return 0;
  
  err:
-       for (--i ; i >= 0 ; i--) {
-               struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- 
-               netif_napi_del(&tx_chn->napi_tx);
+       netif_napi_del(&tx_chn->napi_tx);
+       for (--i; i >= 0; i--) {
+               tx_chn = &common->tx_chns[i];
                devm_free_irq(dev, tx_chn->irq, tx_chn);
+               netif_napi_del(&tx_chn->napi_tx);
        }
  
        return ret;
@@@ -2480,6 -2570,9 +2481,9 @@@ static int am65_cpsw_nuss_init_rx_chns(
                             HRTIMER_MODE_REL_PINNED);
                flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
  
+               netif_napi_add(common->dma_ndev, &flow->napi_rx,
+                              am65_cpsw_nuss_rx_poll);
+ 
                ret = devm_request_irq(dev, flow->irq,
                                       am65_cpsw_nuss_rx_irq,
                                       IRQF_TRIGGER_HIGH,
@@@ -2488,11 -2581,8 +2492,8 @@@
                        dev_err(dev, "failure requesting rx %d irq %u, %d\n",
                                i, flow->irq, ret);
                        flow->irq = -EINVAL;
-                       goto err_flow;
+                       goto err_request_irq;
                }
- 
-               netif_napi_add(common->dma_ndev, &flow->napi_rx,
-                              am65_cpsw_nuss_rx_poll);
        }
  
        /* setup classifier to route priorities to flows */
@@@ -2500,11 -2590,14 +2501,14 @@@
  
        return 0;
  
+ err_request_irq:
+       netif_napi_del(&flow->napi_rx);
+ 
  err_flow:
-       for (--i; i >= 0 ; i--) {
+       for (--i; i >= 0; i--) {
                flow = &rx_chn->flows[i];
-               netif_napi_del(&flow->napi_rx);
                devm_free_irq(dev, flow->irq, flow);
+               netif_napi_del(&flow->napi_rx);
        }
  
  err:
@@@ -3489,10 -3582,6 +3493,10 @@@ static int am65_cpsw_nuss_probe(struct 
        __be64 id_temp;
        int ret, i;
  
 +      BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > 
AM65_CPSW_NAV_SW_DATA_SIZE,
 +                       "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
 +      BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > 
AM65_CPSW_NAV_SW_DATA_SIZE,
 +                       "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
        common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
        if (!common)
                return -ENOMEM;
diff --combined drivers/net/ethernet/ti/icssg/icssg_prueth.c
index c4a995630eca9,9a75733e3f8fb..742f9803026a2
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@@ -559,33 -559,6 +559,33 @@@ const struct icss_iep_clockops prueth_i
        .perout_enable = prueth_perout_enable,
  };
  
 +static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
 +{
 +      struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
 +      struct page_pool *pool = emac->rx_chns.pg_pool;
 +      int ret;
 +
 +      ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
 +      if (ret)
 +              return ret;
 +
 +      ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
 +      if (ret)
 +              xdp_rxq_info_unreg(rxq);
 +
 +      return ret;
 +}
 +
 +static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
 +{
 +      struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
 +
 +      if (!xdp_rxq_info_is_reg(rxq))
 +              return;
 +
 +      xdp_rxq_info_unreg(rxq);
 +}
 +
  static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
  {
        struct net_device *real_dev;
@@@ -807,14 -780,10 +807,14 @@@ static int emac_ndo_open(struct net_dev
        if (ret)
                goto free_tx_ts_irq;
  
 -      ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
 +      ret = prueth_create_xdp_rxqs(emac);
        if (ret)
                goto reset_rx_chn;
  
 +      ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
 +      if (ret)
 +              goto destroy_xdp_rxqs;
 +
        for (i = 0; i < emac->tx_ch_num; i++) {
                ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
                if (ret)
@@@ -840,8 -809,6 +840,8 @@@ reset_tx_chan
         * any SKB for completion. So set false to free_skb
         */
        prueth_reset_tx_chan(emac, i, false);
 +destroy_xdp_rxqs:
 +      prueth_destroy_xdp_rxqs(emac);
  reset_rx_chn:
        prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
  free_tx_ts_irq:
@@@ -912,7 -879,7 +912,7 @@@ static int emac_ndo_stop(struct net_dev
        k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
  
        prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
 -
 +      prueth_destroy_xdp_rxqs(emac);
        napi_disable(&emac->napi_rx);
        hrtimer_cancel(&emac->rx_hrtimer);
  
@@@ -1057,93 -1024,6 +1057,93 @@@ static int emac_ndo_vlan_rx_del_vid(str
        return 0;
  }
  
 +/**
 + * emac_xdp_xmit - Implements ndo_xdp_xmit
 + * @dev: netdev
 + * @n: number of frames
 + * @frames: array of XDP buffer pointers
 + * @flags: XDP extra info
 + *
 + * Return: number of frames successfully sent. Failed frames
 + * will be free'ed by XDP core.
 + *
 + * For error cases, a negative errno code is returned and no-frames
 + * are transmitted (caller must handle freeing frames).
 + **/
 +static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame 
**frames,
 +                       u32 flags)
 +{
 +      struct prueth_emac *emac = netdev_priv(dev);
 +      struct net_device *ndev = emac->ndev;
 +      struct xdp_frame *xdpf;
 +      unsigned int q_idx;
 +      int nxmit = 0;
 +      u32 err;
 +      int i;
 +
 +      q_idx = smp_processor_id() % emac->tx_ch_num;
 +
 +      if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 +              return -EINVAL;
 +
 +      for (i = 0; i < n; i++) {
 +              xdpf = frames[i];
 +              err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
 +              if (err != ICSSG_XDP_TX) {
 +                      ndev->stats.tx_dropped++;
 +                      break;
 +              }
 +              nxmit++;
 +      }
 +
 +      return nxmit;
 +}
 +
 +/**
 + * emac_xdp_setup - add/remove an XDP program
 + * @emac: emac device
 + * @bpf: XDP program
 + *
 + * Return: Always 0 (Success)
 + **/
 +static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
 +{
 +      struct bpf_prog *prog = bpf->prog;
 +      xdp_features_t val;
 +
 +      val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
 +            NETDEV_XDP_ACT_NDO_XMIT;
 +      xdp_set_features_flag(emac->ndev, val);
 +
 +      if (!emac->xdpi.prog && !prog)
 +              return 0;
 +
 +      WRITE_ONCE(emac->xdp_prog, prog);
 +
 +      xdp_attachment_setup(&emac->xdpi, bpf);
 +
 +      return 0;
 +}
 +
 +/**
 + * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
 + * @ndev: network adapter device
 + * @bpf: XDP program
 + *
 + * Return: 0 on success, error code on failure.
 + **/
 +static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
 +{
 +      struct prueth_emac *emac = netdev_priv(ndev);
 +
 +      switch (bpf->command) {
 +      case XDP_SETUP_PROG:
 +              return emac_xdp_setup(emac, bpf);
 +      default:
 +              return -EINVAL;
 +      }
 +}
 +
  static const struct net_device_ops emac_netdev_ops = {
        .ndo_open = emac_ndo_open,
        .ndo_stop = emac_ndo_stop,
@@@ -1158,8 -1038,6 +1158,8 @@@
        .ndo_fix_features = emac_ndo_fix_features,
        .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
 +      .ndo_bpf = emac_ndo_bpf,
 +      .ndo_xdp_xmit = emac_xdp_xmit,
  };
  
  static int prueth_netdev_init(struct prueth *prueth,
@@@ -1188,8 -1066,6 +1188,8 @@@
        emac->prueth = prueth;
        emac->ndev = ndev;
        emac->port_id = port;
 +      emac->xdp_prog = NULL;
 +      emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
        emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
        if (!emac->cmd_wq) {
                ret = -ENOMEM;
@@@ -1646,9 -1522,6 +1646,9 @@@ static int prueth_probe(struct platform
  
        np = dev->of_node;
  
 +      BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > 
PRUETH_NAV_SW_DATA_SIZE),
 +                       "insufficient SW_DATA size");
 +
        prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
        if (!prueth)
                return -ENOMEM;
@@@ -1806,6 -1679,7 +1806,7 @@@
        }
  
        spin_lock_init(&prueth->vtbl_lock);
+       spin_lock_init(&prueth->stats_lock);
        /* setup netdev interfaces */
        if (eth0_node) {
                ret = prueth_netdev_init(prueth, eth0_node);
diff --combined drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 1dd76e2adfcd0,f41786b05741f..b6be4aa57a615
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@@ -8,8 -8,6 +8,8 @@@
  #ifndef __NET_TI_ICSSG_PRUETH_H
  #define __NET_TI_ICSSG_PRUETH_H
  
 +#include <linux/bpf.h>
 +#include <linux/bpf_trace.h>
  #include <linux/etherdevice.h>
  #include <linux/genalloc.h>
  #include <linux/if_vlan.h>
@@@ -35,8 -33,6 +35,8 @@@
  #include <linux/dma/k3-udma-glue.h>
  
  #include <net/devlink.h>
 +#include <net/xdp.h>
 +#include <net/page_pool/helpers.h>
  
  #include "icssg_config.h"
  #include "icss_iep.h"
@@@ -135,26 -131,6 +135,26 @@@ struct prueth_rx_chn 
        u32 descs_num;
        unsigned int irq[ICSSG_MAX_RFLOWS];     /* separate irq per flow */
        char name[32];
 +      struct page_pool *pg_pool;
 +      struct xdp_rxq_info xdp_rxq;
 +};
 +
 +enum prueth_swdata_type {
 +      PRUETH_SWDATA_INVALID = 0,
 +      PRUETH_SWDATA_SKB,
 +      PRUETH_SWDATA_PAGE,
 +      PRUETH_SWDATA_CMD,
 +      PRUETH_SWDATA_XDPF,
 +};
 +
 +struct prueth_swdata {
 +      enum prueth_swdata_type type;
 +      union prueth_data {
 +              struct sk_buff *skb;
 +              struct page *page;
 +              u32 cmd;
 +              struct xdp_frame *xdpf;
 +      } data;
  };
  
  /* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
@@@ -164,12 -140,6 +164,12 @@@
  
  #define PRUETH_MAX_TX_TS_REQUESTS     50 /* Max simultaneous TX_TS requests */
  
 +/* XDP BPF state */
 +#define ICSSG_XDP_PASS           0
 +#define ICSSG_XDP_CONSUMED       BIT(0)
 +#define ICSSG_XDP_TX             BIT(1)
 +#define ICSSG_XDP_REDIR          BIT(2)
 +
  /* Minimum coalesce time in usecs for both Tx and Rx */
  #define ICSSG_MIN_COALESCE_USECS 20
  
@@@ -238,14 -208,8 +238,14 @@@ struct prueth_emac 
        unsigned long rx_pace_timeout_ns;
  
        struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
 +      struct bpf_prog *xdp_prog;
 +      struct xdp_attachment_info xdpi;
  };
  
 +/* The buf includes headroom compatible with both skb and xdpf */
 +#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + 
NET_IP_ALIGN)
 +#define PRUETH_HEADROOM  ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
 +
  /**
   * struct prueth_pdata - PRUeth platform data
   * @fdqring_mode: Free desc queue mode
@@@ -341,6 -305,8 +341,8 @@@ struct prueth 
        int default_vlan;
        /** @vtbl_lock: Lock for vtbl in shared memory */
        spinlock_t vtbl_lock;
+       /** @stats_lock: Lock for reading icssg stats */
+       spinlock_t stats_lock;
  };
  
  struct emac_tx_ts_response {
@@@ -446,10 -412,9 +448,10 @@@ int prueth_init_rx_chns(struct prueth_e
                        struct prueth_rx_chn *rx_chn,
                        char *name, u32 max_rflows,
                        u32 max_desc_num);
 -int prueth_dma_rx_push(struct prueth_emac *emac,
 -                     struct sk_buff *skb,
 -                     struct prueth_rx_chn *rx_chn);
 +int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
 +                            struct prueth_rx_chn *rx_chn,
 +                            struct page *page, u32 buf_len);
 +unsigned int prueth_rxbuf_total_len(unsigned int len);
  void emac_rx_timestamp(struct prueth_emac *emac,
                       struct sk_buff *skb, u32 *psdata);
  enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device 
*ndev);
@@@ -478,9 -443,5 +480,9 @@@ void prueth_put_cores(struct prueth *pr
  
  /* Revision specific helper */
  u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
 +u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
 +                      struct xdp_frame *xdpf,
 +                      struct page *page,
 +                      unsigned int q_idx);
  
  #endif /* __NET_TI_ICSSG_PRUETH_H */
diff --combined net/batman-adv/bat_iv_ogm.c
index 7b4f659612a38,b12645949ae5a..458879d21d663
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@@ -23,7 -23,6 +23,7 @@@
  #include <linux/kref.h>
  #include <linux/list.h>
  #include <linux/lockdep.h>
 +#include <linux/minmax.h>
  #include <linux/mutex.h>
  #include <linux/netdevice.h>
  #include <linux/netlink.h>
@@@ -130,7 -129,7 +130,7 @@@ static u8 batadv_ring_buffer_avg(const 
  /**
   * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
   *  originator
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @addr: mac address of the originator
   *
   * Return: the originator object corresponding to the passed mac address or 
NULL
@@@ -326,15 -325,14 +326,14 @@@ batadv_iv_ogm_aggr_packet(int buff_pos
        /* check if there is enough space for the optional TVLV */
        next_buff_pos += ntohs(ogm_packet->tvlv_len);
  
-       return (next_buff_pos <= packet_len) &&
-              (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+       return next_buff_pos <= packet_len;
  }
  
  /* send a batman ogm to a given interface */
  static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                                     struct batadv_hard_iface *hard_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
        const char *fwd_str;
        u8 packet_num;
        s16 buff_pos;
@@@ -356,7 -354,7 +355,7 @@@
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
 -              if (forw_packet->direct_link_flags & BIT(packet_num) &&
 +              if (test_bit(packet_num, forw_packet->direct_link_flags) &&
                    forw_packet->if_incoming == hard_iface)
                        batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
                else
@@@ -397,20 -395,20 +396,20 @@@
  /* send a batman ogm packet */
  static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
  {
 -      struct net_device *soft_iface;
 +      struct net_device *mesh_iface;
  
        if (!forw_packet->if_incoming) {
                pr_err("Error - can't forward packet: incoming iface not 
specified\n");
                return;
        }
  
 -      soft_iface = forw_packet->if_incoming->soft_iface;
 +      mesh_iface = forw_packet->if_incoming->mesh_iface;
  
        if (WARN_ON(!forw_packet->if_outgoing))
                return;
  
 -      if (forw_packet->if_outgoing->soft_iface != soft_iface) {
 -              pr_warn("%s: soft interface switch for queued OGM\n", __func__);
 +      if (forw_packet->if_outgoing->mesh_iface != mesh_iface) {
 +              pr_warn("%s: mesh interface switch for queued OGM\n", __func__);
                return;
        }
  
@@@ -425,7 -423,7 +424,7 @@@
   * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
   *  existing forward packet
   * @new_bat_ogm_packet: OGM packet to be aggregated
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @packet_len: (total) length of the OGM
   * @send_time: timestamp (jiffies) when the packet is to be sent
   * @directlink: true if this is a direct link packet
@@@ -445,37 -443,28 +444,37 @@@ batadv_iv_ogm_can_aggregate(const struc
                            const struct batadv_forw_packet *forw_packet)
  {
        struct batadv_ogm_packet *batadv_ogm_packet;
 -      int aggregated_bytes = forw_packet->packet_len + packet_len;
 +      unsigned int aggregated_bytes = forw_packet->packet_len + packet_len;
        struct batadv_hard_iface *primary_if = NULL;
 +      u8 packet_num = forw_packet->num_packets;
        bool res = false;
        unsigned long aggregation_end_time;
 +      unsigned int max_bytes;
  
        batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
        aggregation_end_time = send_time;
        aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
  
 +      max_bytes = min_t(unsigned int, if_outgoing->net_dev->mtu,
 +                        BATADV_MAX_AGGREGATION_BYTES);
 +
        /* we can aggregate the current packet to this aggregated packet
         * if:
         *
         * - the send time is within our MAX_AGGREGATION_MS time
         * - the resulting packet won't be bigger than
 -       *   MAX_AGGREGATION_BYTES
 +       *   MAX_AGGREGATION_BYTES and MTU of the outgoing interface
 +       * - the number of packets is lower than MAX_AGGREGATION_PACKETS
         * otherwise aggregation is not possible
         */
        if (!time_before(send_time, forw_packet->send_time) ||
            !time_after_eq(aggregation_end_time, forw_packet->send_time))
                return false;
  
 -      if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
 +      if (aggregated_bytes > max_bytes)
 +              return false;
 +
 +      if (packet_num >= BATADV_MAX_AGGREGATION_PACKETS)
                return false;
  
        /* packet is not leaving on the same interface. */
@@@ -550,16 -539,16 +549,16 @@@ static void batadv_iv_ogm_aggregate_new
                                        struct batadv_hard_iface *if_outgoing,
                                        int own_packet)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_forw_packet *forw_packet_aggr;
        struct sk_buff *skb;
        unsigned char *skb_buff;
        unsigned int skb_size;
        atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
  
 -      if (atomic_read(&bat_priv->aggregated_ogms) &&
 -          packet_len < BATADV_MAX_AGGREGATION_BYTES)
 -              skb_size = BATADV_MAX_AGGREGATION_BYTES;
 +      if (atomic_read(&bat_priv->aggregated_ogms))
 +              skb_size = max_t(unsigned int, BATADV_MAX_AGGREGATION_BYTES,
 +                               packet_len);
        else
                skb_size = packet_len;
  
@@@ -584,13 -573,12 +583,13 @@@
        memcpy(skb_buff, packet_buff, packet_len);
  
        forw_packet_aggr->own = own_packet;
 -      forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
 +      bitmap_zero(forw_packet_aggr->direct_link_flags,
 +                  BATADV_MAX_AGGREGATION_PACKETS);
        forw_packet_aggr->send_time = send_time;
  
        /* save packet direct link flag status */
        if (direct_link)
 -              forw_packet_aggr->direct_link_flags |= 1;
 +              set_bit(0, forw_packet_aggr->direct_link_flags);
  
        INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
                          batadv_iv_send_outstanding_bat_ogm_packet);
@@@ -603,20 -591,22 +602,20 @@@ static void batadv_iv_ogm_aggregate(str
                                    const unsigned char *packet_buff,
                                    int packet_len, bool direct_link)
  {
 -      unsigned long new_direct_link_flag;
 -
        skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len);
        forw_packet_aggr->packet_len += packet_len;
 -      forw_packet_aggr->num_packets++;
  
        /* save packet direct link flag status */
 -      if (direct_link) {
 -              new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
 -              forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
 -      }
 +      if (direct_link)
 +              set_bit(forw_packet_aggr->num_packets,
 +                      forw_packet_aggr->direct_link_flags);
 +
 +      forw_packet_aggr->num_packets++;
  }
  
  /**
   * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @packet_buff: pointer to the OGM
   * @packet_len: (total) length of the OGM
   * @if_incoming: interface where the packet was received
@@@ -695,7 -685,7 +694,7 @@@ static void batadv_iv_ogm_forward(struc
                                  struct batadv_hard_iface *if_incoming,
                                  struct batadv_hard_iface *if_outgoing)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        u16 tvlv_len;
  
        if (batadv_ogm_packet->ttl <= 1) {
@@@ -748,7 -738,7 +747,7 @@@
  static void
  batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
        struct batadv_hashtable *hash = bat_priv->orig_hash;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
@@@ -787,7 -777,7 +786,7 @@@
   */
  static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
        unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct batadv_hard_iface *primary_if, *tmp_hard_iface;
@@@ -849,7 -839,7 +848,7 @@@
         */
        rcu_read_lock();
        list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
 -              if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
 +              if (tmp_hard_iface->mesh_iface != hard_iface->mesh_iface)
                        continue;
  
                if (!kref_get_unless_zero(&tmp_hard_iface->refcount))
@@@ -910,7 -900,7 +909,7 @@@ static u8 batadv_iv_orig_ifinfo_sum(str
  /**
   * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
   *  originator
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @orig_node: the orig node who originally emitted the ogm packet
   * @orig_ifinfo: ifinfo for the outgoing interface of the orig_node
   * @ethhdr: Ethernet header of the OGM
@@@ -1074,7 -1064,7 +1073,7 @@@ static bool batadv_iv_ogm_calc_tq(struc
                                  struct batadv_hard_iface *if_incoming,
                                  struct batadv_hard_iface *if_outgoing)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
        u8 total_count;
@@@ -1216,7 -1206,7 +1215,7 @@@ batadv_iv_ogm_update_seqnos(const struc
                            const struct batadv_hard_iface *if_incoming,
                            struct batadv_hard_iface *if_outgoing)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_orig_node *orig_node;
        struct batadv_orig_ifinfo *orig_ifinfo = NULL;
        struct batadv_neigh_node *neigh_node;
@@@ -1318,7 -1308,7 +1317,7 @@@ batadv_iv_ogm_process_per_outif(const s
                                struct batadv_hard_iface *if_incoming,
                                struct batadv_hard_iface *if_outgoing)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_hardif_neigh_node *hardif_neigh = NULL;
        struct batadv_neigh_node *router = NULL;
        struct batadv_neigh_node *router_router = NULL;
@@@ -1558,7 -1548,7 +1557,7 @@@ static void batadv_iv_ogm_process_reply
  static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
                                  struct batadv_hard_iface *if_incoming)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_orig_node *orig_neigh_node, *orig_node;
        struct batadv_hard_iface *hard_iface;
        struct batadv_ogm_packet *ogm_packet;
@@@ -1608,7 -1598,7 +1607,7 @@@
                if (hard_iface->if_status != BATADV_IF_ACTIVE)
                        continue;
  
 -              if (hard_iface->soft_iface != if_incoming->soft_iface)
 +              if (hard_iface->mesh_iface != if_incoming->mesh_iface)
                        continue;
  
                if (batadv_compare_eth(ethhdr->h_source,
@@@ -1673,7 -1663,7 +1672,7 @@@
                if (hard_iface->if_status != BATADV_IF_ACTIVE)
                        continue;
  
 -              if (hard_iface->soft_iface != bat_priv->soft_iface)
 +              if (hard_iface->mesh_iface != bat_priv->mesh_iface)
                        continue;
  
                if (!kref_get_unless_zero(&hard_iface->refcount))
@@@ -1699,7 -1689,7 +1698,7 @@@ static void batadv_iv_send_outstanding_
        delayed_work = to_delayed_work(work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
 -      bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
 +      bat_priv = netdev_priv(forw_packet->if_incoming->mesh_iface);
  
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
                dropped = true;
@@@ -1730,7 -1720,7 +1729,7 @@@ out
  static int batadv_iv_ogm_receive(struct sk_buff *skb,
                                 struct batadv_hard_iface *if_incoming)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_ogm_packet *ogm_packet;
        u8 *packet_pos;
        int ogm_offset;
@@@ -1809,7 -1799,7 +1808,7 @@@ batadv_iv_ogm_neigh_get_tq_avg(struct b
   * @msg: Netlink message to dump into
   * @portid: Port making netlink request
   * @seq: Sequence number of netlink message
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @if_outgoing: Limit dump to entries with this outgoing interface
   * @orig_node: Originator to dump
   * @neigh_node: Single hops neighbour
@@@ -1872,7 -1862,7 +1871,7 @@@ batadv_iv_ogm_orig_dump_subentry(struc
   * @msg: Netlink message to dump into
   * @portid: Port making netlink request
   * @seq: Sequence number of netlink message
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @if_outgoing: Limit dump to entries with this outgoing interface
   * @orig_node: Originator to dump
   * @sub_s: Number of sub entries to skip
@@@ -1934,7 -1924,7 +1933,7 @@@ batadv_iv_ogm_orig_dump_entry(struct sk
   * @msg: Netlink message to dump into
   * @portid: Port making netlink request
   * @seq: Sequence number of netlink message
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @if_outgoing: Limit dump to entries with this outgoing interface
   * @head: Bucket to be dumped
   * @idx_s: Number of entries to be skipped
@@@ -1975,7 -1965,7 +1974,7 @@@ batadv_iv_ogm_orig_dump_bucket(struct s
   * batadv_iv_ogm_orig_dump() - Dump the originators into a message
   * @msg: Netlink message to dump into
   * @cb: Control block containing additional options
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @if_outgoing: Limit dump to entries with this outgoing interface
   */
  static void
@@@ -2097,7 -2087,7 +2096,7 @@@ batadv_iv_ogm_neigh_dump_neigh(struct s
   * @msg: Netlink message to dump into
   * @portid: Port making netlink request
   * @seq: Sequence number of netlink message
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @hard_iface: Hard interface to dump the neighbours for
   * @idx_s: Number of entries to skip
   *
@@@ -2134,7 -2124,7 +2133,7 @@@ batadv_iv_ogm_neigh_dump_hardif(struct 
   * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
   * @msg: Netlink message to dump into
   * @cb: Control block containing additional options
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @single_hardif: Limit dump to this hard interface
   */
  static void
@@@ -2161,7 -2151,7 +2160,7 @@@ batadv_iv_ogm_neigh_dump(struct sk_buf
        } else {
                list_for_each_entry_rcu(hard_iface, &batadv_hardif_list,
                                        list) {
 -                      if (hard_iface->soft_iface != bat_priv->soft_iface)
 +                      if (hard_iface->mesh_iface != bat_priv->mesh_iface)
                                continue;
  
                        if (i_hardif++ < i_hardif_s)
@@@ -2245,7 -2235,7 +2244,7 @@@ static void batadv_iv_iface_enabled(str
  
  /**
   * batadv_iv_init_sel_class() - initialize GW selection class
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   */
  static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
  {
@@@ -2400,7 -2390,7 +2399,7 @@@ out
   * @msg: Netlink message to dump into
   * @portid: Port making netlink request
   * @cb: Control block containing additional options
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   * @gw_node: Gateway to be dumped
   *
   * Return: Error code, or 0 on success
@@@ -2475,7 -2465,7 +2474,7 @@@ out
   * batadv_iv_gw_dump() - Dump gateways into a message
   * @msg: Netlink message to dump into
   * @cb: Control block containing additional options
 - * @bat_priv: The bat priv with all the soft interface information
 + * @bat_priv: The bat priv with all the mesh interface information
   */
  static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback 
*cb,
                              struct batadv_priv *bat_priv)
diff --combined net/batman-adv/bat_v_ogm.c
index 3b9065a3c746a,8f89ffe6020ce..b86bb647da5b7
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@@ -45,7 -45,7 +45,7 @@@
  
  /**
   * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @addr: the address of the originator
   *
   * Return: the orig_node corresponding to the specified address. If such an
@@@ -96,7 -96,7 +96,7 @@@ static void batadv_v_ogm_start_queue_ti
  
  /**
   * batadv_v_ogm_start_timer() - restart the OGM sending timer
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   */
  static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
  {
@@@ -121,7 -121,7 +121,7 @@@
  static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
                                    struct batadv_hard_iface *hard_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
  
        if (hard_iface->if_status != BATADV_IF_ACTIVE) {
                kfree_skb(skb);
@@@ -239,7 -239,7 +239,7 @@@ static void batadv_v_ogm_aggr_send(stru
  static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
                                     struct batadv_hard_iface *hard_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
  
        if (!atomic_read(&bat_priv->aggregated_ogms)) {
                batadv_v_ogm_send_to_if(skb, hard_iface);
@@@ -256,10 -256,10 +256,10 @@@
  }
  
  /**
 - * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
 - * @bat_priv: the bat priv with all the soft interface information
 + * batadv_v_ogm_send_meshif() - periodic worker broadcasting the own OGM
 + * @bat_priv: the bat priv with all the mesh interface information
   */
 -static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
 +static void batadv_v_ogm_send_meshif(struct batadv_priv *bat_priv)
  {
        struct batadv_hard_iface *hard_iface;
        struct batadv_ogm2_packet *ogm_packet;
@@@ -302,7 -302,7 +302,7 @@@
        /* broadcast on every interface */
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
 -              if (hard_iface->soft_iface != bat_priv->soft_iface)
 +              if (hard_iface->mesh_iface != bat_priv->mesh_iface)
                        continue;
  
                if (!kref_get_unless_zero(&hard_iface->refcount))
@@@ -373,7 -373,7 +373,7 @@@ static void batadv_v_ogm_send(struct wo
        bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
  
        mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
 -      batadv_v_ogm_send_softif(bat_priv);
 +      batadv_v_ogm_send_meshif(bat_priv);
        mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
  }
  
@@@ -408,7 -408,7 +408,7 @@@ void batadv_v_ogm_aggr_work(struct work
   */
  int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface);
  
        batadv_v_ogm_start_queue_timer(hard_iface);
        batadv_v_ogm_start_timer(bat_priv);
@@@ -435,7 -435,7 +435,7 @@@ void batadv_v_ogm_iface_disable(struct 
   */
  void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(primary_iface->mesh_iface);
        struct batadv_ogm2_packet *ogm_packet;
  
        mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
@@@ -452,7 -452,7 +452,7 @@@ unlock
  /**
   * batadv_v_forward_penalty() - apply a penalty to the throughput metric
   *  forwarded with B.A.T.M.A.N. V OGMs
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @if_incoming: the interface where the OGM has been received
   * @if_outgoing: the interface where the OGM has to be forwarded to
   * @throughput: the current throughput
@@@ -505,7 -505,7 +505,7 @@@ static u32 batadv_v_forward_penalty(str
  /**
   * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
   *  outgoing interface
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @ogm_received: previously received OGM to be forwarded
   * @orig_node: the originator which has been updated
   * @neigh_node: the neigh_node through with the OGM has been received
@@@ -592,7 -592,7 +592,7 @@@ out
  
  /**
   * batadv_v_ogm_metric_update() - update route metric based on OGM
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @ogm2: OGM2 structure
   * @orig_node: Originator structure for which the OGM has been received
   * @neigh_node: the neigh_node through with the OGM has been received
@@@ -675,7 -675,7 +675,7 @@@ out
  
  /**
   * batadv_v_ogm_route_update() - update routes based on OGM
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @ethhdr: the Ethernet header of the OGM2
   * @ogm2: OGM2 structure
   * @orig_node: Originator structure for which the OGM has been received
@@@ -770,7 -770,7 +770,7 @@@ out
  
  /**
   * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing 
if
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   * @ethhdr: the Ethernet header of the OGM2
   * @ogm2: OGM2 structure
   * @orig_node: Originator structure for which the OGM has been received
@@@ -839,8 -839,7 +839,7 @@@ batadv_v_ogm_aggr_packet(int buff_pos, 
        /* check if there is enough space for the optional TVLV */
        next_buff_pos += ntohs(ogm2_packet->tvlv_len);
  
-       return (next_buff_pos <= packet_len) &&
-              (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+       return next_buff_pos <= packet_len;
  }
  
  /**
@@@ -852,7 -851,7 +851,7 @@@
  static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
                                 struct batadv_hard_iface *if_incoming)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct ethhdr *ethhdr;
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_hardif_neigh_node *hardif_neigh = NULL;
@@@ -926,7 -925,7 +925,7 @@@
                if (hard_iface->if_status != BATADV_IF_ACTIVE)
                        continue;
  
 -              if (hard_iface->soft_iface != bat_priv->soft_iface)
 +              if (hard_iface->mesh_iface != bat_priv->mesh_iface)
                        continue;
  
                if (!kref_get_unless_zero(&hard_iface->refcount))
@@@ -985,7 -984,7 +984,7 @@@ out
  int batadv_v_ogm_packet_recv(struct sk_buff *skb,
                             struct batadv_hard_iface *if_incoming)
  {
 -      struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +      struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface);
        struct batadv_ogm2_packet *ogm_packet;
        struct ethhdr *ethhdr;
        int ogm_offset;
@@@ -1036,7 -1035,7 +1035,7 @@@ free_skb
  
  /**
   * batadv_v_ogm_init() - initialise the OGM2 engine
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   *
   * Return: 0 on success or a negative error code in case of failure
   */
@@@ -1071,7 -1070,7 +1070,7 @@@ int batadv_v_ogm_init(struct batadv_pri
  
  /**
   * batadv_v_ogm_free() - free OGM private resources
 - * @bat_priv: the bat priv with all the soft interface information
 + * @bat_priv: the bat priv with all the mesh interface information
   */
  void batadv_v_ogm_free(struct batadv_priv *bat_priv)
  {
diff --combined net/bluetooth/6lowpan.c
index 1298c8685bad3,3c29778171c58..73530b8e1eaee
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@@ -13,7 -13,6 +13,7 @@@
  #include <net/ipv6.h>
  #include <net/ip6_route.h>
  #include <net/addrconf.h>
 +#include <net/netdev_lock.h>
  #include <net/pkt_sched.h>
  
  #include <net/bluetooth/bluetooth.h>
@@@ -826,11 -825,16 +826,16 @@@ static struct sk_buff *chan_alloc_skb_c
                                         unsigned long hdr_len,
                                         unsigned long len, int nb)
  {
+       struct sk_buff *skb;
+ 
        /* Note that we must allocate using GFP_ATOMIC here as
         * this function is called originally from netdev hard xmit
         * function in atomic context.
         */
-       return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
+       skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+       return skb;
  }
  
  static void chan_suspend_cb(struct l2cap_chan *chan)
diff --combined net/can/af_can.c
index 7b191dbe36930,65230e81fa08c..4c059e41c8316
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@@ -172,8 -172,6 +172,8 @@@ static int can_create(struct net *net, 
                sock_orphan(sk);
                sock_put(sk);
                sock->sk = NULL;
 +      } else {
 +              sock_prot_inuse_add(net, sk->sk_prot, 1);
        }
  
   errout:
@@@ -289,8 -287,8 +289,8 @@@ int can_send(struct sk_buff *skb, int l
                netif_rx(newskb);
  
        /* update statistics */
-       pkg_stats->tx_frames++;
-       pkg_stats->tx_frames_delta++;
+       atomic_long_inc(&pkg_stats->tx_frames);
+       atomic_long_inc(&pkg_stats->tx_frames_delta);
  
        return 0;
  
@@@ -649,8 -647,8 +649,8 @@@ static void can_receive(struct sk_buff 
        int matches;
  
        /* update statistics */
-       pkg_stats->rx_frames++;
-       pkg_stats->rx_frames_delta++;
+       atomic_long_inc(&pkg_stats->rx_frames);
+       atomic_long_inc(&pkg_stats->rx_frames_delta);
  
        /* create non-zero unique skb identifier together with *skb */
        while (!(can_skb_prv(skb)->skbcnt))
@@@ -671,8 -669,8 +671,8 @@@
        consume_skb(skb);
  
        if (matches > 0) {
-               pkg_stats->matches++;
-               pkg_stats->matches_delta++;
+               atomic_long_inc(&pkg_stats->matches);
+               atomic_long_inc(&pkg_stats->matches_delta);
        }
  }
  
diff --combined net/core/lwtunnel.c
index 6d3833269c2b4,4417a18b3e951..e39a459540ec0
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@@ -23,6 -23,8 +23,8 @@@
  #include <net/ip6_fib.h>
  #include <net/rtnh.h>
  
+ #include "dev.h"
+ 
  DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled);
  EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled);
  
@@@ -147,8 -149,7 +149,8 @@@ int lwtunnel_build_state(struct net *ne
  }
  EXPORT_SYMBOL_GPL(lwtunnel_build_state);
  
 -int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack)
 +int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack,
 +                            bool rtnl_is_held)
  {
        const struct lwtunnel_encap_ops *ops;
        int ret = -EINVAL;
@@@ -159,19 -160,21 +161,19 @@@
                return ret;
        }
  
 -      rcu_read_lock();
 -      ops = rcu_dereference(lwtun_encaps[encap_type]);
 -      rcu_read_unlock();
 +      ops = rcu_access_pointer(lwtun_encaps[encap_type]);
  #ifdef CONFIG_MODULES
        if (!ops) {
                const char *encap_type_str = lwtunnel_encap_str(encap_type);
  
                if (encap_type_str) {
 -                      __rtnl_unlock();
 +                      if (rtnl_is_held)
 +                              __rtnl_unlock();
                        request_module("rtnl-lwt-%s", encap_type_str);
 -                      rtnl_lock();
 +                      if (rtnl_is_held)
 +                              rtnl_lock();
  
 -                      rcu_read_lock();
 -                      ops = rcu_dereference(lwtun_encaps[encap_type]);
 -                      rcu_read_unlock();
 +                      ops = rcu_access_pointer(lwtun_encaps[encap_type]);
                }
        }
  #endif
@@@ -184,8 -187,7 +186,8 @@@
  EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type);
  
  int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
 -                                 struct netlink_ext_ack *extack)
 +                                 struct netlink_ext_ack *extack,
 +                                 bool rtnl_is_held)
  {
        struct rtnexthop *rtnh = (struct rtnexthop *)attr;
        struct nlattr *nla_entype;
@@@ -207,8 -209,7 +209,8 @@@
                                encap_type = nla_get_u16(nla_entype);
  
                                if (lwtunnel_valid_encap_type(encap_type,
 -                                                            extack) != 0)
 +                                                            extack,
 +                                                            rtnl_is_held) != 
0)
                                        return -EOPNOTSUPP;
                        }
                }
@@@ -326,13 -327,23 +328,23 @@@ EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap)
  
  int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
-       struct dst_entry *dst = skb_dst(skb);
        const struct lwtunnel_encap_ops *ops;
        struct lwtunnel_state *lwtstate;
-       int ret = -EINVAL;
+       struct dst_entry *dst;
+       int ret;
+ 
+       if (dev_xmit_recursion()) {
+               net_crit_ratelimited("%s(): recursion limit reached on 
datapath\n",
+                                    __func__);
+               ret = -ENETDOWN;
+               goto drop;
+       }
  
-       if (!dst)
+       dst = skb_dst(skb);
+       if (!dst) {
+               ret = -EINVAL;
                goto drop;
+       }
        lwtstate = dst->lwtstate;
  
        if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
@@@ -342,8 -353,11 +354,11 @@@
        ret = -EOPNOTSUPP;
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
-       if (likely(ops && ops->output))
+       if (likely(ops && ops->output)) {
+               dev_xmit_recursion_inc();
                ret = ops->output(net, sk, skb);
+               dev_xmit_recursion_dec();
+       }
        rcu_read_unlock();
  
        if (ret == -EOPNOTSUPP)
@@@ -360,13 -374,23 +375,23 @@@ EXPORT_SYMBOL_GPL(lwtunnel_output)
  
  int lwtunnel_xmit(struct sk_buff *skb)
  {
-       struct dst_entry *dst = skb_dst(skb);
        const struct lwtunnel_encap_ops *ops;
        struct lwtunnel_state *lwtstate;
-       int ret = -EINVAL;
+       struct dst_entry *dst;
+       int ret;
+ 
+       if (dev_xmit_recursion()) {
+               net_crit_ratelimited("%s(): recursion limit reached on 
datapath\n",
+                                    __func__);
+               ret = -ENETDOWN;
+               goto drop;
+       }
  
-       if (!dst)
+       dst = skb_dst(skb);
+       if (!dst) {
+               ret = -EINVAL;
                goto drop;
+       }
  
        lwtstate = dst->lwtstate;
  
@@@ -377,8 -401,11 +402,11 @@@
        ret = -EOPNOTSUPP;
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
-       if (likely(ops && ops->xmit))
+       if (likely(ops && ops->xmit)) {
+               dev_xmit_recursion_inc();
                ret = ops->xmit(skb);
+               dev_xmit_recursion_dec();
+       }
        rcu_read_unlock();
  
        if (ret == -EOPNOTSUPP)
@@@ -395,13 -422,23 +423,23 @@@ EXPORT_SYMBOL_GPL(lwtunnel_xmit)
  
  int lwtunnel_input(struct sk_buff *skb)
  {
-       struct dst_entry *dst = skb_dst(skb);
        const struct lwtunnel_encap_ops *ops;
        struct lwtunnel_state *lwtstate;
-       int ret = -EINVAL;
+       struct dst_entry *dst;
+       int ret;
  
-       if (!dst)
+       if (dev_xmit_recursion()) {
+               net_crit_ratelimited("%s(): recursion limit reached on 
datapath\n",
+                                    __func__);
+               ret = -ENETDOWN;
                goto drop;
+       }
+ 
+       dst = skb_dst(skb);
+       if (!dst) {
+               ret = -EINVAL;
+               goto drop;
+       }
        lwtstate = dst->lwtstate;
  
        if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
@@@ -411,8 -448,11 +449,11 @@@
        ret = -EOPNOTSUPP;
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
-       if (likely(ops && ops->input))
+       if (likely(ops && ops->input)) {
+               dev_xmit_recursion_inc();
                ret = ops->input(skb);
+               dev_xmit_recursion_dec();
+       }
        rcu_read_unlock();
  
        if (ret == -EOPNOTSUPP)
diff --combined net/core/neighbour.c
index 344c9cd168ec1,1a620f903c56e..0738aa6cca25f
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@@ -518,7 -518,7 +518,7 @@@ static struct neigh_hash_table *neigh_h
        if (!ret)
                return NULL;
  
 -      hash_heads = kvzalloc(size, GFP_ATOMIC);
 +      hash_heads = kzalloc(size, GFP_ATOMIC);
        if (!hash_heads) {
                kfree(ret);
                return NULL;
@@@ -536,7 -536,7 +536,7 @@@ static void neigh_hash_free_rcu(struct 
                                                    struct neigh_hash_table,
                                                    rcu);
  
 -      kvfree(nht->hash_heads);
 +      kfree(nht->hash_heads);
        kfree(nht);
  }
  
@@@ -832,10 -832,12 +832,10 @@@ static int pneigh_ifdown_and_unlock(str
        return -ENOENT;
  }
  
 -static void neigh_parms_destroy(struct neigh_parms *parms);
 -
  static inline void neigh_parms_put(struct neigh_parms *parms)
  {
        if (refcount_dec_and_test(&parms->refcnt))
 -              neigh_parms_destroy(parms);
 +              kfree(parms);
  }
  
  /*
@@@ -1711,6 -1713,11 +1711,6 @@@ void neigh_parms_release(struct neigh_t
  }
  EXPORT_SYMBOL(neigh_parms_release);
  
 -static void neigh_parms_destroy(struct neigh_parms *parms)
 -{
 -      kfree(parms);
 -}
 -
  static struct lock_class_key neigh_table_proxy_queue_class;
  
  static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
@@@ -2243,6 -2250,7 +2243,7 @@@ static const struct nla_policy nl_neigh
  static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
        [NDTPA_IFINDEX]                 = { .type = NLA_U32 },
        [NDTPA_QUEUE_LEN]               = { .type = NLA_U32 },
+       [NDTPA_QUEUE_LENBYTES]          = { .type = NLA_U32 },
        [NDTPA_PROXY_QLEN]              = { .type = NLA_U32 },
        [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
        [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
diff --combined net/ipv6/route.c
index fb2e99a565291,15ce21afc8c62..c3406a0d45bd9
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@@ -3644,7 -3644,8 +3644,8 @@@ out
                in6_dev_put(idev);
  
        if (err) {
-               lwtstate_put(fib6_nh->fib_nh_lws);
+               fib_nh_common_release(&fib6_nh->nh_common);
+               fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
                fib6_nh->fib_nh_lws = NULL;
                netdev_put(dev, dev_tracker);
        }
@@@ -3802,10 -3803,12 +3803,12 @@@ static struct fib6_info *ip6_route_info
        if (nh) {
                if (rt->fib6_src.plen) {
                        NL_SET_ERR_MSG(extack, "Nexthops can not be used with 
source routing");
+                       err = -EINVAL;
                        goto out_free;
                }
                if (!nexthop_get(nh)) {
                        NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+                       err = -ENOENT;
                        goto out_free;
                }
                rt->nh = nh;
@@@ -5128,8 -5131,7 +5131,8 @@@ static int rtm_to_fib6_config(struct sk
                cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
  
                err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
 -                                                   cfg->fc_mp_len, extack);
 +                                                   cfg->fc_mp_len,
 +                                                   extack, true);
                if (err < 0)
                        goto errout;
        }
@@@ -5148,8 -5150,7 +5151,8 @@@
        if (tb[RTA_ENCAP_TYPE]) {
                cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
  
 -              err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
 +              err = lwtunnel_valid_encap_type(cfg->fc_encap_type,
 +                                              extack, true);
                if (err < 0)
                        goto errout;
        }
diff --combined net/ipv6/tcpv6_offload.c
index 91b88daa5b555,ae2da28f9dfb1..d9b11fe41bf0c
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@@ -35,7 -35,7 +35,7 @@@ static void tcp6_check_fraglist_gro(str
  
        inet6_get_iif_sdif(skb, &iif, &sdif);
        hdr = skb_gro_network_header(skb);
 -      net = dev_net(skb->dev);
 +      net = dev_net_rcu(skb->dev);
        sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
                                        &hdr->saddr, th->source,
                                        &hdr->daddr, ntohs(th->dest),
@@@ -94,14 -94,23 +94,23 @@@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_co
  }
  
  static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
+                                    struct in6_addr *oldip,
+                                    const struct in6_addr *newip,
                                     __be16 *oldport, __be16 newport)
  {
-       struct tcphdr *th;
+       struct tcphdr *th = tcp_hdr(seg);
+ 
+       if (!ipv6_addr_equal(oldip, newip)) {
+               inet_proto_csum_replace16(&th->check, seg,
+                                         oldip->s6_addr32,
+                                         newip->s6_addr32,
+                                         true);
+               *oldip = *newip;
+       }
  
        if (*oldport == newport)
                return;
  
-       th = tcp_hdr(seg);
        inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
        *oldport = newport;
  }
@@@ -129,10 -138,10 +138,10 @@@ static struct sk_buff *__tcpv6_gso_segm
                th2 = tcp_hdr(seg);
                iph2 = ipv6_hdr(seg);
  
-               iph2->saddr = iph->saddr;
-               iph2->daddr = iph->daddr;
-               __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
-               __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
+               __tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
+                                        &th2->source, th->source);
+               __tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
+                                        &th2->dest, th->dest);
        }
  
        return segs;
diff --combined net/xdp/xsk_buff_pool.c
index 14716ad3d7bc4,d158cb6dd3919..25a76c5ce0f12
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@@ -1,7 -1,5 +1,7 @@@
  // SPDX-License-Identifier: GPL-2.0
  
 +#include <linux/netdevice.h>
 +#include <net/netdev_lock.h>
  #include <net/xsk_buff_pool.h>
  #include <net/xdp_sock.h>
  #include <net/xdp_sock_drv.h>
@@@ -107,7 -105,7 +107,7 @@@ struct xsk_buff_pool *xp_create_and_ass
                if (pool->unaligned)
                        pool->free_heads[i] = xskb;
                else
-                       xp_init_xskb_addr(xskb, pool, i * pool->chunk_size);
+                       xp_init_xskb_addr(xskb, pool, (u64)i * 
pool->chunk_size);
        }
  
        return pool;
@@@ -221,7 -219,6 +221,7 @@@ int xp_assign_dev(struct xsk_buff_pool 
        bpf.xsk.pool = pool;
        bpf.xsk.queue_id = queue_id;
  
 +      netdev_ops_assert_locked(netdev);
        err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
        if (err)
                goto err_unreg_pool;
@@@ -702,56 -699,18 +702,56 @@@ void xp_free(struct xdp_buff_xsk *xskb
  }
  EXPORT_SYMBOL(xp_free);
  
 -void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
 +static u64 __xp_raw_get_addr(const struct xsk_buff_pool *pool, u64 addr)
 +{
 +      return pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
 +}
 +
 +static void *__xp_raw_get_data(const struct xsk_buff_pool *pool, u64 addr)
  {
 -      addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
        return pool->addrs + addr;
  }
 +
 +void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
 +{
 +      return __xp_raw_get_data(pool, __xp_raw_get_addr(pool, addr));
 +}
  EXPORT_SYMBOL(xp_raw_get_data);
  
 -dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
 +static dma_addr_t __xp_raw_get_dma(const struct xsk_buff_pool *pool, u64 addr)
  {
 -      addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
        return (pool->dma_pages[addr >> PAGE_SHIFT] &
                ~XSK_NEXT_PG_CONTIG_MASK) +
                (addr & ~PAGE_MASK);
  }
 +
 +dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
 +{
 +      return __xp_raw_get_dma(pool, __xp_raw_get_addr(pool, addr));
 +}
  EXPORT_SYMBOL(xp_raw_get_dma);
 +
 +/**
 + * xp_raw_get_ctx - get &xdp_desc context
 + * @pool: XSk buff pool desc address belongs to
 + * @addr: desc address (from userspace)
 + *
 + * Helper for getting desc's DMA address and metadata pointer, if present.
 + * Saves one call on hotpath, double calculation of the actual address,
 + * and inline checks for metadata presence and sanity.
 + *
 + * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
 + * pointer, if it is present and valid (initialized to %NULL otherwise).
 + */
 +struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
 +{
 +      struct xdp_desc_ctx ret;
 +
 +      addr = __xp_raw_get_addr(pool, addr);
 +
 +      ret.dma = __xp_raw_get_dma(pool, addr);
 +      ret.meta = __xsk_buff_get_metadata(pool, __xp_raw_get_data(pool, addr));
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL(xp_raw_get_ctx);
diff --combined tools/testing/selftests/drivers/net/ping.py
index 79f07e0510ecc,fc69bfcc37c46..93120e86e1024
--- a/tools/testing/selftests/drivers/net/ping.py
+++ b/tools/testing/selftests/drivers/net/ping.py
@@@ -7,26 -7,26 +7,26 @@@ from lib.py import ksft_run, ksft_exi
  from lib.py import ksft_eq, KsftSkipEx, KsftFailEx
  from lib.py import EthtoolFamily, NetDrvEpEnv
  from lib.py import bkg, cmd, wait_port_listen, rand_port
- from lib.py import ethtool, ip
+ from lib.py import defer, ethtool, ip
  
  remote_ifname=""
  no_sleep=False
  
  def _test_v4(cfg) -> None:
 -    cfg.require_v4()
 +    cfg.require_ipver("4")
  
 -    cmd(f"ping -c 1 -W0.5 {cfg.remote_v4}")
 -    cmd(f"ping -c 1 -W0.5 {cfg.v4}", host=cfg.remote)
 -    cmd(f"ping -s 65000 -c 1 -W0.5 {cfg.remote_v4}")
 -    cmd(f"ping -s 65000 -c 1 -W0.5 {cfg.v4}", host=cfg.remote)
 +    cmd("ping -c 1 -W0.5 " + cfg.remote_addr_v["4"])
 +    cmd("ping -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote)
 +    cmd("ping -s 65000 -c 1 -W0.5 " + cfg.remote_addr_v["4"])
 +    cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote)
  
  def _test_v6(cfg) -> None:
 -    cfg.require_v6()
 +    cfg.require_ipver("6")
  
 -    cmd(f"ping -c 1 -W5 {cfg.remote_v6}")
 -    cmd(f"ping -c 1 -W5 {cfg.v6}", host=cfg.remote)
 -    cmd(f"ping -s 65000 -c 1 -W0.5 {cfg.remote_v6}")
 -    cmd(f"ping -s 65000 -c 1 -W0.5 {cfg.v6}", host=cfg.remote)
 +    cmd("ping -c 1 -W5 " + cfg.remote_addr_v["6"])
 +    cmd("ping -c 1 -W5 " + cfg.addr_v["6"], host=cfg.remote)
 +    cmd("ping -s 65000 -c 1 -W0.5 " + cfg.remote_addr_v["6"])
 +    cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["6"], host=cfg.remote)
  
  def _test_tcp(cfg) -> None:
      cfg.require_cmd("socat", remote=True)
@@@ -60,6 -60,7 +60,7 @@@ def _set_xdp_generic_sb_on(cfg) -> None
      prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
      cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, 
host=cfg.remote)
      cmd(f"ip link set dev {cfg.ifname} mtu 1500 xdpgeneric obj {prog} sec 
xdp", shell=True)
+     defer(cmd, f"ip link set dev {cfg.ifname} xdpgeneric off")
  
      if no_sleep != True:
          time.sleep(10)
@@@ -68,7 -69,9 +69,9 @@@ def _set_xdp_generic_mb_on(cfg) -> None
      test_dir = os.path.dirname(os.path.realpath(__file__))
      prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
      cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, 
host=cfg.remote)
+     defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote)
      ip("link set dev %s mtu 9000 xdpgeneric obj %s sec xdp.frags" % 
(cfg.ifname, prog))
+     defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdpgeneric off")
  
      if no_sleep != True:
          time.sleep(10)
@@@ -78,6 -81,7 +81,7 @@@ def _set_xdp_native_sb_on(cfg) -> None
      prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
      cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, 
host=cfg.remote)
      cmd(f"ip -j link set dev {cfg.ifname} mtu 1500 xdp obj {prog} sec xdp", 
shell=True)
+     defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off")
      xdp_info = ip("-d link show %s" % (cfg.ifname), json=True)[0]
      if xdp_info['xdp']['mode'] != 1:
          """
@@@ -94,10 -98,11 +98,11 @@@ def _set_xdp_native_mb_on(cfg) -> None
      test_dir = os.path.dirname(os.path.realpath(__file__))
      prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
      cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, 
host=cfg.remote)
+     defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote)
      try:
          cmd(f"ip link set dev {cfg.ifname} mtu 9000 xdp obj {prog} sec 
xdp.frags", shell=True)
+         defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off")
      except Exception as e:
-         cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, 
host=cfg.remote)
          raise KsftSkipEx('device does not support native-multi-buffer XDP')
  
      if no_sleep != True:
@@@ -111,6 -116,7 +116,7 @@@ def _set_xdp_offload_on(cfg) -> None
          cmd(f"ip link set dev {cfg.ifname} xdpoffload obj {prog} sec xdp", 
shell=True)
      except Exception as e:
          raise KsftSkipEx('device does not support offloaded XDP')
+     defer(ip, f"link set dev {cfg.ifname} xdpoffload off")
      cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, 
host=cfg.remote)
  
      if no_sleep != True:
@@@ -120,7 -126,7 +126,7 @@@ def get_interface_info(cfg) -> None
      global remote_ifname
      global no_sleep
  
 -    remote_info = cmd(f"ip -4 -o addr show to {cfg.remote_v4} | awk '{{print 
$2}}'", shell=True, host=cfg.remote).stdout
 +    remote_info = cmd(f"ip -4 -o addr show to {cfg.remote_addr_v['4']} | awk 
'{{print $2}}'", shell=True, host=cfg.remote).stdout
      remote_ifname = remote_info.rstrip('\n')
      if remote_ifname == "":
          raise KsftFailEx('Can not get remote interface')
@@@ -157,7 -163,6 +163,6 @@@ def test_xdp_generic_sb(cfg, netnl) -> 
      _test_v4(cfg)
      _test_v6(cfg)
      _test_tcp(cfg)
-     ip("link set dev %s xdpgeneric off" % cfg.ifname)
  
  def test_xdp_generic_mb(cfg, netnl) -> None:
      _set_xdp_generic_mb_on(cfg)
@@@ -169,7 -174,6 +174,6 @@@
      _test_v4(cfg)
      _test_v6(cfg)
      _test_tcp(cfg)
-     ip("link set dev %s xdpgeneric off" % cfg.ifname)
  
  def test_xdp_native_sb(cfg, netnl) -> None:
      _set_xdp_native_sb_on(cfg)
@@@ -181,7 -185,6 +185,6 @@@
      _test_v4(cfg)
      _test_v6(cfg)
      _test_tcp(cfg)
-     ip("link set dev %s xdp off" % cfg.ifname)
  
  def test_xdp_native_mb(cfg, netnl) -> None:
      _set_xdp_native_mb_on(cfg)
@@@ -193,14 -196,12 +196,12 @@@
      _test_v4(cfg)
      _test_v6(cfg)
      _test_tcp(cfg)
-     ip("link set dev %s xdp off" % cfg.ifname)
  
  def test_xdp_offload(cfg, netnl) -> None:
      _set_xdp_offload_on(cfg)
      _test_v4(cfg)
      _test_v6(cfg)
      _test_tcp(cfg)
-     ip("link set dev %s xdpoffload off" % cfg.ifname)
  
  def main() -> None:
      with NetDrvEpEnv(__file__) as cfg:
@@@ -213,7 -214,6 +214,6 @@@
                    test_xdp_native_mb,
                    test_xdp_offload],
                   args=(cfg, EthtoolFamily()))
-         set_interface_init(cfg)
      ksft_exit()
  
  
diff --combined tools/testing/selftests/net/Makefile
index f03a0399e7a3a,8f32b4f01aee1..6d718b478ed83
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@@ -7,7 -7,7 +7,7 @@@ CFLAGS += -I../../../../usr/include/ $(
  CFLAGS += -I../
  
  TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \
 -            rtnetlink.sh xfrm_policy.sh test_blackhole_dev.sh
 +            rtnetlink.sh xfrm_policy.sh
  TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
  TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
  TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh 
reuseport_addr_any.sh
@@@ -31,15 -31,11 +31,14 @@@ TEST_PROGS += veth.s
  TEST_PROGS += ioam6.sh
  TEST_PROGS += gro.sh
  TEST_PROGS += gre_gso.sh
- TEST_PROGS += gre_ipv6_lladdr.sh
  TEST_PROGS += cmsg_so_mark.sh
  TEST_PROGS += cmsg_so_priority.sh
 -TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
 +TEST_PROGS += test_so_rcv.sh
 +TEST_PROGS += cmsg_time.sh cmsg_ip.sh
  TEST_PROGS += netns-name.sh
 +TEST_PROGS += link_netns.py
  TEST_PROGS += nl_netdev.py
 +TEST_PROGS += rtnetlink.py
  TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
  TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
  TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
@@@ -79,7 -75,6 +78,7 @@@ TEST_GEN_PROGS += reuseport_dualstack r
  TEST_GEN_FILES += toeplitz
  TEST_GEN_FILES += cmsg_sender
  TEST_GEN_FILES += stress_reuseport_listen
 +TEST_GEN_FILES += so_rcv_listener
  TEST_PROGS += test_vxlan_vnifiltering.sh
  TEST_GEN_FILES += io_uring_zerocopy_tx
  TEST_PROGS += io_uring_zerocopy_tx.sh
@@@ -105,7 -100,7 +104,8 @@@ TEST_PROGS += vlan_bridge_binding.s
  TEST_PROGS += bpf_offload.py
  TEST_PROGS += ipv6_route_update_soft_lockup.sh
  TEST_PROGS += busy_poll_test.sh
 +TEST_GEN_PROGS += proc_net_pktgen
+ TEST_PROGS += lwt_dst_cache_ref_loop.sh
  
  # YNL files, must be before "include ..lib.mk"
  YNL_GEN_FILES := busy_poller netlink-dumps
diff --combined tools/testing/selftests/net/config
index b0d0eda829d08,61e5116987f3e..130d532b7e67a
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@@ -18,8 -18,6 +18,8 @@@ CONFIG_DUMMY=
  CONFIG_BRIDGE_VLAN_FILTERING=y
  CONFIG_BRIDGE=y
  CONFIG_CRYPTO_CHACHA20POLY1305=m
 +CONFIG_DEBUG_INFO_BTF=y
 +CONFIG_DEBUG_INFO_BTF_MODULES=n
  CONFIG_VLAN_8021Q=y
  CONFIG_GENEVE=m
  CONFIG_IFB=y
@@@ -109,9 -107,5 +109,11 @@@ CONFIG_XFRM_INTERFACE=
  CONFIG_XFRM_USER=m
  CONFIG_IP_NF_MATCH_RPFILTER=m
  CONFIG_IP6_NF_MATCH_RPFILTER=m
 +CONFIG_IPVLAN=m
 +CONFIG_CAN=m
 +CONFIG_CAN_DEV=m
 +CONFIG_CAN_VXCAN=m
 +CONFIG_NETKIT=y
 +CONFIG_NET_PKTGEN=m
+ CONFIG_IPV6_ILA=m
+ CONFIG_IPV6_RPL_LWTUNNEL=y

-- 
LinuxNextTracking

Reply via email to