On Wed, 2007-03-10 at 09:18 -0400, jamal wrote:
> On Tue, 2007-02-10 at 16:33 -0700, Michael Chan wrote:
> 
> > Seems ok to me.  I think we should make it more clear that we're
> > skipping over the VLAN tag:
> > 
> > (struct tg3_tx_cbdata *)&((__skb)->cb[sizeof(struct vlan_skb_tx_cookie)])
> > 
> 
> Will do - thanks Michael.

Ok, attached patch against net-2.6.24 from this morning. I am setting up
some equipment for testing as i type this - so i will test for any
regressions. If you dont hear from me on the subject then all went ok.

cheers,
jamal

[TG3] Some cleanups
Make the xmit path code better functionally organized and more readable.
Matt Carlson contributed the moving of the VLAN formatting into
XXXX_prep_frame() portion and Michael Chan eyeballed the patch and
made it better.

Signed-off-by: Jamal Hadi Salim <[EMAIL PROTECTED]>

---
commit 190674ff1fe0b7bddf038c2bfddf45b9c6418e2a
tree d6f03d10d94a39ad22fd590b30c6d50c81f3eb5c
parent 0a14acec89454d350c1bd141b3309df421433f3c
author Jamal Hadi Salim <[EMAIL PROTECTED]> Sun, 07 Oct 2007 08:10:32 -0400
committer Jamal Hadi Salim <[EMAIL PROTECTED]> Sun, 07 Oct 2007 08:10:32 -0400

 drivers/net/tg3.c |  281 ++++++++++++++++++++++++++++++++---------------------
 1 files changed, 172 insertions(+), 109 deletions(-)

diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index d4ac6e9..ce6f02f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3910,47 +3910,72 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
        txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
 }
 
-/* hard_start_xmit for devices that don't have any bugs and
- * support TG3_FLG2_HW_TSO_2 only.
- */
-static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+struct tg3_tx_cbdata {
+       u32 base_flags;
+       unsigned int mss;
+};
+/* using cb[8] skips over the VLAN tag:
+ * (struct tg3_tx_cbdata *)&((__skb)->cb[sizeof(struct vlan_skb_tx_cookie)])
+*/
+#define TG3_SKB_CB(__skb)       ((struct tg3_tx_cbdata *)&((__skb)->cb[8]))
+#define NETDEV_TX_DROPPED       -5
+
+static int tg3_prep_bug_frame(struct sk_buff *skb, struct net_device *dev)
 {
+       struct tg3_tx_cbdata *cb = TG3_SKB_CB(skb);
+#if TG3_VLAN_TAG_USED
        struct tg3 *tp = netdev_priv(dev);
-       dma_addr_t mapping;
-       u32 len, entry, base_flags, mss;
+       u32 vlantag = 0;
 
-       len = skb_headlen(skb);
+       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+               vlantag = (TXD_FLAG_VLAN | (vlan_tx_tag_get(skb) << 16));
 
-       /* We are running in BH disabled context with netif_tx_lock
-        * and TX reclaim runs via tp->napi.poll inside of a software
-        * interrupt.  Furthermore, IRQ processing runs lockless so we have
-        * no IRQ context deadlocks to worry about either.  Rejoice!
-        */
-       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
+       cb->base_flags = vlantag;
+#endif
 
-                       /* This is a hard error, log it. */
-                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
-                              "queue awake!\n", dev->name);
+       cb->mss = skb_shinfo(skb)->gso_size;
+       if (cb->mss != 0) {
+               if (skb_header_cloned(skb) &&
+                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+                       dev_kfree_skb(skb);
+                       return NETDEV_TX_DROPPED;
                }
-               return NETDEV_TX_BUSY;
+
+               cb->base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+                              TXD_FLAG_CPU_POST_DMA);
        }
 
-       entry = tp->tx_prod;
-       base_flags = 0;
-       mss = 0;
-       if ((mss = skb_shinfo(skb)->gso_size) != 0) {
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               cb->base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+       return NETDEV_TX_OK;
+}
+
+static int tg3_prep_frame(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tg3_tx_cbdata *cb = TG3_SKB_CB(skb);
+#if TG3_VLAN_TAG_USED
+       struct tg3 *tp = netdev_priv(dev);
+       u32 vlantag = 0;
+
+       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+               vlantag = (TXD_FLAG_VLAN | (vlan_tx_tag_get(skb) << 16));
+
+       cb->base_flags = vlantag;
+#endif
+
+       cb->mss = skb_shinfo(skb)->gso_size;
+       if (cb->mss != 0) {
                int tcp_opt_len, ip_tcp_len;
 
                if (skb_header_cloned(skb) &&
                    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
                        dev_kfree_skb(skb);
-                       goto out_unlock;
+                       return NETDEV_TX_DROPPED;
                }
 
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-                       mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
+                       cb->mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
                else {
                        struct iphdr *iph = ip_hdr(skb);
 
@@ -3958,32 +3983,58 @@ static int tg3_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
                        ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
 
                        iph->check = 0;
-                       iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
-                       mss |= (ip_tcp_len + tcp_opt_len) << 9;
+                       iph->tot_len = htons(cb->mss + ip_tcp_len
+                                            + tcp_opt_len);
+                       cb->mss |= (ip_tcp_len + tcp_opt_len) << 9;
                }
 
-               base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+               cb->base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
                tcp_hdr(skb)->check = 0;
 
        }
        else if (skb->ip_summed == CHECKSUM_PARTIAL)
-               base_flags |= TXD_FLAG_TCPUDP_CSUM;
-#if TG3_VLAN_TAG_USED
-       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
-               base_flags |= (TXD_FLAG_VLAN |
-                              (vlan_tx_tag_get(skb) << 16));
-#endif
+               cb->base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+       return NETDEV_TX_OK;
+}
+
+void tg3_complete_xmit(struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       u32 entry = tp->tx_prod;
+
+       /* Packets are ready, update Tx producer idx local and on card. */
+       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+
+       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
+               netif_stop_queue(dev);
+               if (tg3_tx_avail(tp) >= TG3_TX_WAKEUP_THRESH(tp))
+                       netif_wake_queue(dev);
+       }
+
+       mmiowb();
+       dev->trans_start = jiffies;
+}
 
+static int tg3_enqueue(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       dma_addr_t mapping;
+       u32 len, entry;
+       struct tg3_tx_cbdata *cb = TG3_SKB_CB(skb);
+
+       entry = tp->tx_prod;
+       len = skb_headlen(skb);
        /* Queue skb data, a.k.a. the main skb fragment. */
        mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
 
        tp->tx_buffers[entry].skb = skb;
        pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
-       tg3_set_txd(tp, entry, mapping, len, base_flags,
-                   (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
+       tg3_set_txd(tp, entry, mapping, len, cb->base_flags,
+                   (skb_shinfo(skb)->nr_frags == 0) | (cb->mss << 1));
 
        entry = NEXT_TX(entry);
 
@@ -4005,28 +4056,51 @@ static int tg3_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
                        pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, 
mapping);
 
                        tg3_set_txd(tp, entry, mapping, len,
-                                   base_flags, (i == last) | (mss << 1));
+                                   cb->base_flags,
+                                   (i == last) | (cb->mss << 1));
 
                        entry = NEXT_TX(entry);
                }
        }
 
-       /* Packets are ready, update Tx producer idx local and on card. */
-       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
-
        tp->tx_prod = entry;
-       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
-               netif_stop_queue(dev);
-               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
-                       netif_wake_queue(tp->dev);
-       }
+       return NETDEV_TX_OK;
+}
+
+/* hard_start_xmit for devices that don't have any bugs and
+ * support TG3_FLG2_HW_TSO_2 only.
+ */
+static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int ret = tg3_prep_frame(skb, dev);
+       /* XXX: original code did mmiowb(); on failure,
+       * I dont think thats necessary
+       */
+       if (unlikely(ret != NETDEV_TX_OK))
+              return NETDEV_TX_OK;
 
-out_unlock:
-       mmiowb();
+       /* We are running in BH disabled context with netif_tx_lock
+        * and TX reclaim runs via tp->poll inside of a software
+        * interrupt.  Furthermore, IRQ processing runs lockless so we have
+        * no IRQ context deadlocks to worry about either.  Rejoice!
+        */
+       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
 
-       dev->trans_start = jiffies;
+                       /* This is a hard error, log it. */
+                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+                              "queue awake!\n", dev->name);
+               }
+               return NETDEV_TX_BUSY;
+       }
 
-       return NETDEV_TX_OK;
+       ret = tg3_enqueue(skb, dev);
+       if (ret == NETDEV_TX_OK)
+               tg3_complete_xmit(dev);
+
+       return ret;
 }
 
 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
@@ -4067,46 +4141,19 @@ tg3_tso_bug_end:
 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
  */
-static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
+static int tg3_enqueue_buggy(struct sk_buff *skb, struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        dma_addr_t mapping;
-       u32 len, entry, base_flags, mss;
+       u32 len, entry;
        int would_hit_hwbug;
+       struct tg3_tx_cbdata *cb = TG3_SKB_CB(skb);
 
-       len = skb_headlen(skb);
 
-       /* We are running in BH disabled context with netif_tx_lock
-        * and TX reclaim runs via tp->napi.poll inside of a software
-        * interrupt.  Furthermore, IRQ processing runs lockless so we have
-        * no IRQ context deadlocks to worry about either.  Rejoice!
-        */
-       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
-
-                       /* This is a hard error, log it. */
-                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
-                              "queue awake!\n", dev->name);
-               }
-               return NETDEV_TX_BUSY;
-       }
-
-       entry = tp->tx_prod;
-       base_flags = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               base_flags |= TXD_FLAG_TCPUDP_CSUM;
-       mss = 0;
-       if ((mss = skb_shinfo(skb)->gso_size) != 0) {
+       if (cb->mss != 0) {
                struct iphdr *iph;
                int tcp_opt_len, ip_tcp_len, hdr_len;
 
-               if (skb_header_cloned(skb) &&
-                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-                       dev_kfree_skb(skb);
-                       goto out_unlock;
-               }
-
                tcp_opt_len = tcp_optlen(skb);
                ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
 
@@ -4115,15 +4162,13 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, 
struct net_device *dev)
                             (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
                        return (tg3_tso_bug(tp, skb));
 
-               base_flags |= (TXD_FLAG_CPU_PRE_DMA |
-                              TXD_FLAG_CPU_POST_DMA);
 
                iph = ip_hdr(skb);
                iph->check = 0;
-               iph->tot_len = htons(mss + hdr_len);
+               iph->tot_len = htons(cb->mss + hdr_len);
                if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
                        tcp_hdr(skb)->check = 0;
-                       base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+                       cb->base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
                } else
                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
                                                                 iph->daddr, 0,
@@ -4136,22 +4181,19 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, 
struct net_device *dev)
                                int tsflags;
 
                                tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
-                               mss |= (tsflags << 11);
+                               cb->mss |= (tsflags << 11);
                        }
                } else {
                        if (tcp_opt_len || iph->ihl > 5) {
                                int tsflags;
 
                                tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
-                               base_flags |= tsflags << 12;
+                               cb->base_flags |= tsflags << 12;
                        }
                }
        }
-#if TG3_VLAN_TAG_USED
-       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
-               base_flags |= (TXD_FLAG_VLAN |
-                              (vlan_tx_tag_get(skb) << 16));
-#endif
+       len = skb_headlen(skb);
+       entry = tp->tx_prod;
 
        /* Queue skb data, a.k.a. the main skb fragment. */
        mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -4164,8 +4206,8 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, 
struct net_device *dev)
        if (tg3_4g_overflow_test(mapping, len))
                would_hit_hwbug = 1;
 
-       tg3_set_txd(tp, entry, mapping, len, base_flags,
-                   (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
+       tg3_set_txd(tp, entry, mapping, len, cb->base_flags,
+                   (skb_shinfo(skb)->nr_frags == 0) | (cb->mss << 1));
 
        entry = NEXT_TX(entry);
 
@@ -4194,10 +4236,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, 
struct net_device *dev)
 
                        if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
                                tg3_set_txd(tp, entry, mapping, len,
-                                           base_flags, (i == last)|(mss << 1));
+                                           cb->base_flags,
+                                           (i == last)|(cb->mss << 1));
                        else
                                tg3_set_txd(tp, entry, mapping, len,
-                                           base_flags, (i == last));
+                                           cb->base_flags, (i == last));
 
                        entry = NEXT_TX(entry);
                }
@@ -4214,28 +4257,48 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, 
struct net_device *dev)
                 * failure, silently drop this packet.
                 */
                if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
-                                               &start, base_flags, mss))
-                       goto out_unlock;
+                                               &start, cb->base_flags,
+                                               cb->mss)) {
+                       mmiowb();
+                       return NETDEV_TX_OK;
+               }
 
                entry = start;
        }
 
-       /* Packets are ready, update Tx producer idx local and on card. */
-       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
-
        tp->tx_prod = entry;
-       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
-               netif_stop_queue(dev);
-               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
-                       netif_wake_queue(tp->dev);
-       }
+       return NETDEV_TX_OK;
+}
+
+static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int ret = tg3_prep_bug_frame(skb, dev);
 
-out_unlock:
-       mmiowb();
+       if (unlikely(ret != NETDEV_TX_OK))
+              return NETDEV_TX_OK;
 
-       dev->trans_start = jiffies;
+       /* We are running in BH disabled context with netif_tx_lock
+        * and TX reclaim runs via tp->poll inside of a software
+        * interrupt.  Furthermore, IRQ processing runs lockless so we have
+        * no IRQ context deadlocks to worry about either.  Rejoice!
+        */
+       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
 
-       return NETDEV_TX_OK;
+                       /* This is a hard error, log it. */
+                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+                              "queue awake!\n", dev->name);
+               }
+               return NETDEV_TX_BUSY;
+       }
+
+       ret = tg3_enqueue_buggy(skb, dev);
+       if (ret == NETDEV_TX_OK)
+               tg3_complete_xmit(dev);
+
+       return ret;
 }
 
 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,

Reply via email to