In the coming commits we need a more generic tunneling prepare function.
As a preparation for this, we refactor it in a separate commit.

Signed-off-by: Jon Maloy <jon.ma...@ericsson.com>
---
 net/tipc/link.c | 148 +++++++++++++++++++-------------------------------------
 net/tipc/msg.c  |  50 +++++++++++++++++++
 net/tipc/msg.h  |   4 +-
 3 files changed, 104 insertions(+), 98 deletions(-)

diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2531f94..f16219c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1801,132 +1801,86 @@ void tipc_link_create_dummy_tnl_msg(struct tipc_link 
*l,
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq)
 {
+       bool enhanced = tnl->peer_caps & TIPC_TUNNEL_ENHANCED;
        struct sk_buff_head *fdefq = &tnl->failover_deferdq;
-       struct sk_buff *skb, *tnlskb;
-       struct tipc_msg *hdr, tnlhdr;
-       struct sk_buff_head *queue = &l->transmq;
-       struct sk_buff_head tmpxq, tnlq, frags;
-       u16 pktlen, pktcnt, seqno = l->snd_nxt;
-       bool pktcnt_need_update = false;
-       u16 syncpt;
-       int rc;
+       u32 self = tipc_own_addr(l->net);
+       struct sk_buff_head tnlq, tmpxq;
+       struct tipc_msg hdr, *_hdr;
+       u16 inner_seqno, syncpt;
+       struct sk_buff *skb;
+       int pktcnt, rc;
+       int inner_len;
 
        if (!tnl)
                return;
+       skb_queue_head_init(&tnlq);
+       skb_queue_head_init(&tmpxq);
 
-       __skb_queue_head_init(&tnlq);
-       /* Link Synching:
-        * From now on, send only one single ("dummy") SYNCH message
-        * to peer. The SYNCH message does not contain any data, just
-        * a header conveying the synch point to the peer.
+       /* When TUNNEL_ENHANCED is supported, it is sufficient to send a
+        * single empty SYNCH message to peer, conveying the synch point
         */
-       if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
-               tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
-                                        INT_H_SIZE, 0, l->addr,
-                                        tipc_own_addr(l->net),
-                                        0, 0, 0);
-               if (!tnlskb) {
-                       pr_warn("%sunable to create dummy SYNCH_MSG\n",
-                               link_co_err);
+       if (mtyp == SYNCH_MSG && enhanced) {
+
+               skb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG, INT_H_SIZE, 0,
+                                     l->addr, tipc_own_addr(l->net), 0, 0, 0);
+               if (!skb) {
+                       pr_warn("%sfailed to create SYNCH_MSG\n", link_co_err);
                        return;
                }
-
-               hdr = buf_msg(tnlskb);
+               _hdr = buf_msg(skb);
                syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
-               msg_set_syncpt(hdr, syncpt);
-               msg_set_bearer_id(hdr, l->peer_bearer_id);
-               __skb_queue_tail(&tnlq, tnlskb);
+               msg_set_syncpt(_hdr, syncpt);
+               msg_set_bearer_id(_hdr, l->peer_bearer_id);
+               __skb_queue_tail(&tnlq, skb);
                tipc_link_xmit(tnl, &tnlq, xmitq);
                return;
        }
-
-       __skb_queue_head_init(&tmpxq);
-       __skb_queue_head_init(&frags);
-       /* At least one packet required for safe algorithm => add dummy */
-       skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
-                             BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
-                             0, 0, TIPC_ERR_NO_PORT);
+       /* At least one packet is required for safe algorithm => add dummy */
+       skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_CONN_MSG, BASIC_H_SIZE,
+                             0, l->addr, self, 0, 0, TIPC_ERR_NO_PORT);
        if (!skb) {
                pr_warn("%sunable to create tunnel packet\n", link_co_err);
                return;
        }
-       __skb_queue_tail(&tnlq, skb);
+       skb_queue_tail(&tnlq, skb);
        tipc_link_xmit(l, &tnlq, &tmpxq);
        __skb_queue_purge(&tmpxq);
 
-       /* Initialize reusable tunnel packet header */
-       tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
-                     mtyp, INT_H_SIZE, l->addr);
+       /* Number of packets to report depends on if transmitq is linear */
        if (mtyp == SYNCH_MSG)
                pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
        else
                pktcnt = skb_queue_len(&l->transmq);
        pktcnt += skb_queue_len(&l->backlogq);
-       msg_set_msgcnt(&tnlhdr, pktcnt);
-       msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
-tnl:
-       /* Wrap each packet into a tunnel packet */
-       skb_queue_walk(queue, skb) {
-               hdr = buf_msg(skb);
-               if (queue == &l->backlogq)
-                       msg_set_seqno(hdr, seqno++);
-               pktlen = msg_size(hdr);
-
-               /* Tunnel link MTU is not large enough? This could be
-                * due to:
-                * 1) Link MTU has just changed or set differently;
-                * 2) Or FAILOVER on the top of a SYNCH message
-                *
-                * The 2nd case should not happen if peer supports
-                * TIPC_TUNNEL_ENHANCED
-                */
-               if (pktlen > tnl->mtu - INT_H_SIZE) {
-                       if (mtyp == FAILOVER_MSG &&
-                           (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
-                               rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
-                                                      &frags);
-                               if (rc) {
-                                       pr_warn("%sunable to frag msg: rc %d\n",
-                                               link_co_err, rc);
-                                       return;
-                               }
-                               pktcnt += skb_queue_len(&frags) - 1;
-                               pktcnt_need_update = true;
-                               skb_queue_splice_tail_init(&frags, &tnlq);
-                               continue;
-                       }
-                       /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
-                        * => Just warn it and return!
-                        */
-                       pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
-                                           link_co_err, msg_user(hdr),
-                                           msg_type(hdr), msg_size(hdr));
-                       return;
-               }
 
-               msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
-               tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
-               if (!tnlskb) {
-                       pr_warn("%sunable to send packet\n", link_co_err);
-                       return;
-               }
-               skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
-               __skb_queue_tail(&tnlq, tnlskb);
+       /* Copy all buffers into a temporary queue */
+       inner_seqno = l->snd_nxt;
+       skb_queue_walk(&l->backlogq, skb) {
+               msg_set_seqno(buf_msg(skb), inner_seqno++);
+       }
+       tipc_skb_queue_copy(&l->transmq, &tnlq);
+       tipc_skb_queue_copy(&l->backlogq, &tnlq);
+
+       /* Fragment the buffers if applicable */
+       rc = tipc_skb_queue_fragment(&tnlq, tnl->mtu, &pktcnt, enhanced, mtyp);
+       if (rc) {
+               pr_warn("%sunable to frag msg: rc %d\n", link_co_err, rc);
+               __skb_queue_purge(&tnlq);
+               return;
        }
-       if (queue != &l->backlogq) {
-               queue = &l->backlogq;
-               goto tnl;
+       /* Create reusable tunnel header and prepend to packets */
+       tipc_msg_init(self, &hdr, TUNNEL_PROTOCOL, mtyp, INT_H_SIZE, l->addr);
+       msg_set_msgcnt(&hdr, pktcnt);
+       msg_set_bearer_id(&hdr, l->peer_bearer_id);
+       skb_queue_walk(&tnlq, skb) {
+               inner_len = msg_size(buf_msg(skb));
+               skb_push(skb, INT_H_SIZE);
+               msg_set_size(&hdr, inner_len + INT_H_SIZE);
+               skb_copy_to_linear_data(skb, &hdr, INT_H_SIZE);
        }
-
-       if (pktcnt_need_update)
-               skb_queue_walk(&tnlq, skb) {
-                       hdr = buf_msg(skb);
-                       msg_set_msgcnt(hdr, pktcnt);
-               }
-
        tipc_link_xmit(tnl, &tnlq, xmitq);
 
+       /* Prepare for receiving failover packets, if any */
        if (mtyp == FAILOVER_MSG) {
                tnl->drop_point = l->rcv_nxt;
                tnl->failover_reasm_skb = l->reasm_buf;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 0d515d2..812334d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -860,3 +860,53 @@ void tipc_skb_reject(struct net *net, int err, struct 
sk_buff *skb,
        if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
                __skb_queue_tail(xmitq, skb);
 }
+
+void tipc_skb_queue_copy(struct sk_buff_head *from,
+                        struct sk_buff_head *to)
+{
+       struct sk_buff *skb, *__skb;
+
+       skb_queue_walk(from, skb) {
+               __skb = pskb_copy(skb, GFP_ATOMIC);
+               if (!__skb)
+                       break;
+               __skb_queue_tail(to, __skb);
+       }
+}
+
+/* tipc_skb_queue_fragment(): Fragment tunnel packets if applicable
+ * Two cases:
+ * 1) Tunnel link MTU has just changed or is set differently
+ * 2) FAILOVER on top of a SYNCH message.
+ */
+int tipc_skb_queue_fragment(struct sk_buff_head *skbq, int pktmax,
+                            int *pktcnt, bool frag_supp, int mtyp)
+{
+       struct sk_buff_head frags, tmpq;
+       struct tipc_msg *hdr;
+       struct sk_buff *skb;
+       int rc = 0;
+
+       __skb_queue_head_init(&frags);
+       __skb_queue_head_init(&tmpq);
+       skb_queue_splice_tail_init(skbq, &tmpq);
+
+       skb_queue_walk(&tmpq, skb) {
+               hdr = buf_msg(skb);
+               if (msg_size(hdr) <= pktmax - INT_H_SIZE)
+                       continue;
+               if (mtyp == FAILOVER_MSG && !frag_supp) {
+                       rc = -1;
+                       goto exit;
+               }
+               rc = tipc_msg_fragment(skb, hdr, pktmax, &frags);
+               if (rc)
+                       goto exit;
+               *pktcnt += skb_queue_len(&frags) - 1;
+               skb_queue_splice_tail_init(&frags, &tmpq);
+       }
+exit:
+       __skb_queue_purge(&frags);
+       skb_queue_splice_tail_init(&tmpq, skbq);
+       return rc;
+}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index e4c13f2..2197f64 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1125,7 +1125,9 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
                             struct sk_buff *skb);
 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
-
+void tipc_skb_queue_copy(struct sk_buff_head *from, struct sk_buff_head *to);
+int tipc_skb_queue_fragment(struct sk_buff_head *skbq, int pktmax,
+                            int *pktcnt, bool frag_supp, int mtyp);
 static inline u16 buf_seqno(struct sk_buff *skb)
 {
        return msg_seqno(buf_msg(skb));
-- 
2.1.4



_______________________________________________
tipc-discussion mailing list
tipc-discussion@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/tipc-discussion

Reply via email to