Main changes:
1.  when a close/shutdown syscall is called, instead of sending a DREQ, put
last socket ref count  and go to TCP_CLOSE state do:
- take a socket reference count
- set state to TCP_TIME_WAIT
- start infiniband tear down
- wait till got RDMA_CM_EVENT_TIMEWAIT_EXIT
- set socket state to TCP_CLOSE
- put last socket ref count - this will call sdp_destruct()

2. Use fin_wait_timeout to timeout a half closed connection which the peer 
didn't
respond with SDP_MID_DISCONNECT

3. No need for sdp_time_wait

4. Abortive close will immedietly start infiniband teardown - will finilize the
socket closing when CM finish.

5. when sdp_post_sends send SDP_MID_DISCONNECT according to socket state
multiple DISCONNECT could be sent - changed it to be triggered by a flag.

Signed-off-by: Amir Vadai <[EMAIL PROTECTED]>
---
 drivers/infiniband/ulp/sdp/sdp.h       |   20 +++---
 drivers/infiniband/ulp/sdp/sdp_bcopy.c |  137 +++++++++++++++++++++++++-------
 drivers/infiniband/ulp/sdp/sdp_cma.c   |   10 ++-
 drivers/infiniband/ulp/sdp/sdp_main.c  |   97 +++++++++++++----------
 4 files changed, 180 insertions(+), 84 deletions(-)

diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
index 5bd4041..3b2dffb 100644
--- a/drivers/infiniband/ulp/sdp/sdp.h
+++ b/drivers/infiniband/ulp/sdp/sdp.h
@@ -125,7 +125,7 @@ struct sdp_sock {
        int xmit_size_goal;
        int nonagle;
 
-       int time_wait;
+       int fin_wait_timeout;
 
        unsigned keepalive_time;
 
@@ -133,6 +133,8 @@ struct sdp_sock {
        unsigned keepalive_tx_head;
        unsigned keepalive_rx_head;
 
+       int sdp_disconnect;
+
        /* Data below will be reset on error */
        /* rdma specific */
        struct rdma_cm_id *id;
@@ -227,15 +229,12 @@ static inline void sdp_set_error(struct sock *sk, int err)
 {
        sk->sk_err = -err;
        if (sk->sk_socket)
-               sk->sk_socket->state = SS_UNCONNECTED;
-
-       sdp_set_state(sk, TCP_CLOSE);
+               sk->sk_socket->state = SS_DISCONNECTING;
 
-       if (sdp_sk(sk)->time_wait) {
-               sdp_dbg(sk, "%s: destroy in time wait state\n", __func__);
-               sdp_sk(sk)->time_wait = 0;
-               queue_work(sdp_workqueue, &sdp_sk(sk)->destroy_work);
-       }
+       if (sk->sk_state == TCP_SYN_SENT)
+               sdp_set_state(sk, TCP_CLOSE);
+       else
+               sdp_set_state(sk, TCP_TIME_WAIT);
 
        sk->sk_error_report(sk);
 }
@@ -245,7 +244,6 @@ extern struct workqueue_struct *sdp_workqueue;
 int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
 void sdp_reset(struct sock *sk);
 void sdp_reset_sk(struct sock *sk, int rc);
-void sdp_time_wait_destroy_sk(struct sdp_sock *ssk);
 void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
 void sdp_work(struct work_struct *work);
 int sdp_post_credits(struct sdp_sock *ssk);
@@ -254,6 +252,8 @@ void sdp_post_recvs(struct sdp_sock *ssk);
 int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq);
 void sdp_post_sends(struct sdp_sock *ssk, int nonagle);
 void sdp_destroy_work(struct work_struct *work);
+void sdp_cancel_fin_wait_timeout(struct sdp_sock *ssk);
+void sdp_fin_work(struct work_struct *work);
 void sdp_time_wait_work(struct work_struct *work);
 struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id);
 struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq);
diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c 
b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
index 3a8d5ac..d3f50b5 100644
--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
@@ -96,7 +96,7 @@ void sdp_remove_large_sock(struct sdp_sock *ssk)
        }
 }
 
-/* Like tcp_fin */
+/* Like tcp_fin - called when SDP_MID_DISCONNECT is received */
 static void sdp_fin(struct sock *sk)
 {
        sdp_dbg(sk, "%s\n", __func__);
@@ -104,6 +104,42 @@ static void sdp_fin(struct sock *sk)
        sk->sk_shutdown |= RCV_SHUTDOWN;
        sock_set_flag(sk, SOCK_DONE);
 
+       switch (sk->sk_state) {
+       case TCP_SYN_RECV:
+       case TCP_ESTABLISHED:
+               sdp_set_state(sk, TCP_CLOSE_WAIT);
+               break;
+
+       case TCP_FIN_WAIT1:
+               /* This case occurs when a simultaneous close
+                * happens, we must ack the received FIN and
+                * enter the CLOSING state.
+                */
+               sdp_set_state(sk, TCP_CLOSING);
+               break;
+
+       case TCP_FIN_WAIT2:
+               /* Received a reply FIN - start Infiniband tear down */
+               sdp_set_state(sk, TCP_TIME_WAIT);
+               sdp_dbg(sk, "%s: Starting Infiniband tear down sending DREQ\n",
+                               __func__);
+
+               if (sdp_sk(sk)->id) {
+                       rdma_disconnect(sdp_sk(sk)->id);
+               } else {
+                       sdp_warn(sk, "%s: sdp_sk(sk)->id is NULL\n", __func__);
+                       BUG();
+               }
+               break;
+       case TCP_TIME_WAIT:
+       case TCP_CLOSE:
+               break;
+       default:
+               sdp_warn(sk, "%s: FIN in unexpected state. sk->sk_state=%d\n",
+                               __func__, sk->sk_state);
+               break;
+       }
+
 
        sk_mem_reclaim(sk);
 
@@ -524,7 +560,9 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
 
        if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
            likely(ssk->bufs > 1) &&
-           likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+           likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+           likely((1 << ssk->isk.sk.sk_state) &
+                   (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
                                          GFP_KERNEL);
@@ -533,20 +571,16 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                sdp_post_send(ssk, skb, SDP_MID_DATA);
        }
 
-       if (unlikely((1 << ssk->isk.sk.sk_state) &
-                       (TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+       if (unlikely(ssk->sdp_disconnect) &&
                !ssk->isk.sk.sk_send_head &&
                ssk->bufs > (ssk->remote_credits >= ssk->rx_head - 
ssk->rx_tail)) {
+               ssk->sdp_disconnect = 0;
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
                                          gfp_page);
                /* FIXME */
                BUG_ON(!skb);
                sdp_post_send(ssk, skb, SDP_MID_DISCONN);
-               if (ssk->isk.sk.sk_state == TCP_FIN_WAIT1)
-                       sdp_set_state(&ssk->isk.sk, TCP_FIN_WAIT2);
-               else
-                       sdp_set_state(&ssk->isk.sk, TCP_CLOSING);
        }
 }
 
@@ -590,6 +624,7 @@ static void sdp_handle_resize_ack(struct sdp_sock *ssk, 
struct sdp_chrecvbuf *bu
 
 static int sdp_handle_recv_comp(struct sdp_sock *ssk, struct ib_wc *wc)
 {
+       struct sock *sk = &ssk->isk.sk;
        int frags;
        struct sk_buff *skb;
        struct sdp_bsdh *h;
@@ -603,16 +638,15 @@ static int sdp_handle_recv_comp(struct sdp_sock *ssk, 
struct ib_wc *wc)
 
        if (unlikely(wc->status)) {
                if (wc->status != IB_WC_WR_FLUSH_ERR) {
-                       sdp_dbg(&ssk->isk.sk,
-                                       "Recv completion with error. "
-                                       "Status %d\n", wc->status);
-                       sdp_reset(&ssk->isk.sk);
+                       sdp_dbg(sk, "Recv completion with error. Status %d\n",
+                               wc->status);
+                       sdp_reset(sk);
                }
                __kfree_skb(skb);
                return 0;
        }
 
-       sdp_dbg_data(&ssk->isk.sk, "Recv completion. ID %d Length %d\n",
+       sdp_dbg_data(sk, "Recv completion. ID %d Length %d\n",
                        (int)wc->wr_id, wc->byte_len);
        if (unlikely(wc->byte_len < sizeof(struct sdp_bsdh))) {
                printk(KERN_WARNING "SDP BUG! byte_len %d < %zd\n",
@@ -651,7 +685,7 @@ static int sdp_handle_recv_comp(struct sdp_sock *ssk, 
struct ib_wc *wc)
        }
 
        if (unlikely(h->flags & SDP_OOB_PEND))
-               sk_send_sigurg(&ssk->isk.sk);
+               sk_send_sigurg(sk);
 
        skb_pull(skb, sizeof(struct sdp_bsdh));
 
@@ -661,21 +695,36 @@ static int sdp_handle_recv_comp(struct sdp_sock *ssk, 
struct ib_wc *wc)
                        __kfree_skb(skb);
                        break;
                }
-               skb = sdp_sock_queue_rcv_skb(&ssk->isk.sk, skb);
+
+               if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
+                       /* got data in RCV_SHUTDOWN */
+                       if ((1 << sk->sk_state) &
+                                       (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2)) {
+                               /* go into abortive close */
+                               sdp_set_state(sk, TCP_TIME_WAIT);
+
+                               sk->sk_prot->disconnect(sk, 0);
+                       }
+
+                       __kfree_skb(skb);
+                       break;
+               }
+               skb = sdp_sock_queue_rcv_skb(sk, skb);
                if (unlikely(h->flags & SDP_OOB_PRES))
                        sdp_urg(ssk, skb);
                break;
        case SDP_MID_DISCONN:
-               /* this will wake recvmsg */
-               sdp_sock_queue_rcv_skb(&ssk->isk.sk, skb);
-               sdp_fin(&ssk->isk.sk);
+               if (ssk->fin_wait_timeout)
+                       sdp_cancel_fin_wait_timeout(ssk);
+
+               __kfree_skb(skb);
+               sdp_fin(sk);
                break;
        case SDP_MID_CHRCVBUF:
                sdp_handle_resize_request(ssk,
-                               (struct sdp_chrecvbuf *)skb->data);
+                       (struct sdp_chrecvbuf *)skb->data);
                __kfree_skb(skb);
                break;
-
        case SDP_MID_CHRCVBUF_ACK:
                sdp_handle_resize_ack(ssk, (struct sdp_chrecvbuf *)skb->data);
                __kfree_skb(skb);
@@ -685,27 +734,62 @@ static int sdp_handle_recv_comp(struct sdp_sock *ssk, 
struct ib_wc *wc)
                printk(KERN_WARNING "SDP: FIXME MID %d\n", h->mid);
                __kfree_skb(skb);
        }
+
        return 0;
 }
 
 static int sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc)
 {
        struct sk_buff *skb;
+       struct sdp_bsdh *h;
 
        skb = sdp_send_completion(ssk, wc->wr_id);
        if (unlikely(!skb))
                return -1;
-       sk_wmem_free_skb(&ssk->isk.sk, skb);
+
        if (unlikely(wc->status)) {
                if (wc->status != IB_WC_WR_FLUSH_ERR) {
-                       sdp_dbg(&ssk->isk.sk,
-                                       "Send completion with error. "
+                       sdp_dbg(&ssk->isk.sk, "Send completion with error. "
                                        "Status %d\n", wc->status);
                        sdp_set_error(&ssk->isk.sk, -ECONNRESET);
                        wake_up(&ssk->wq);
                }
+               goto out;
        }
 
+       h = (struct sdp_bsdh *)skb->data;
+
+       if (likely(h->mid != SDP_MID_DISCONN))
+               goto out;
+
+       switch (ssk->isk.sk.sk_state) {
+       case TCP_FIN_WAIT1:
+               /* sdp_set_state(&ssk->isk.sk,
+                  TCP_FIN_WAIT2); */
+               break;
+       case TCP_CLOSING:
+       case TCP_LAST_ACK:
+               sdp_set_state(&ssk->isk.sk, TCP_TIME_WAIT);
+
+               sdp_dbg(&ssk->isk.sk,
+                       "%s: waiting for Infiniband tear down\n", __func__);
+
+               /* We will not issue a DREQ here - because DREQ is sent on
+                  the CM QP - and could reach the peer before the
+                  SDP_MID_DISCONNECT This will put the peer in abortive close
+                  state - without any real reason.
+                  TODO: maybe a timeout whould be used here - what if the
+                  peer won't send a DREQ? */
+               break;
+       default:
+               sdp_dbg(&ssk->isk.sk,
+                       "%s: sent DISCONNECT from unexpected state %d\n",
+                       __func__, ssk->isk.sk.sk_state);
+       }
+
+out:
+       sk_wmem_free_skb(&ssk->isk.sk, skb);
+
        return 0;
 }
 
@@ -734,13 +818,6 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct 
ib_wc *wc)
 
                return;
        }
-
-       if (ssk->time_wait && !ssk->isk.sk.sk_send_head &&
-           ssk->tx_head == ssk->tx_tail) {
-               sdp_dbg(&ssk->isk.sk, "%s: destroy in time wait state\n",
-                       __func__);
-               sdp_time_wait_destroy_sk(ssk);
-       }
 }
 
 void sdp_completion_handler(struct ib_cq *cq, void *cq_context)
diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c 
b/drivers/infiniband/ulp/sdp/sdp_cma.c
index f2fb083..8759bac 100644
--- a/drivers/infiniband/ulp/sdp/sdp_cma.c
+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
@@ -498,9 +498,17 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct 
rdma_cm_event *event)
                        ((struct sockaddr_in 
*)&id->route.addr.src_addr)->sin_addr.s_addr;
                rc = sdp_connected_handler(sk, event);
                break;
-       case RDMA_CM_EVENT_DISCONNECTED:
+       case RDMA_CM_EVENT_DISCONNECTED: /* This means DREQ/DREP received */
                sdp_dbg(sk, "RDMA_CM_EVENT_DISCONNECTED\n");
                rdma_disconnect(id);
+
+               if (sk->sk_state != TCP_TIME_WAIT) {
+                       sdp_set_error(sk, EPIPE);
+                       rc = sdp_disconnected_handler(sk);
+               }
+               break;
+       case RDMA_CM_EVENT_TIMWAIT_EXIT:
+               sdp_dbg(sk, "RDMA_CM_EVENT_TIMEWAIT_EXIT\n");
                rc = sdp_disconnected_handler(sk);
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c 
b/drivers/infiniband/ulp/sdp/sdp_main.c
index acd3aab..960ec90 100644
--- a/drivers/infiniband/ulp/sdp/sdp_main.c
+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
@@ -342,10 +342,7 @@ void sdp_reset_sk(struct sock *sk, int rc)
 
        memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
 
-       if (ssk->time_wait) {
-               sdp_dbg(sk, "%s: destroy in time wait state\n", __func__);
-               sdp_time_wait_destroy_sk(ssk);
-       }
+       queue_work(sdp_workqueue, &ssk->destroy_work);
 
        sk->sk_state_change(sk);
 
@@ -436,9 +433,10 @@ done:
        sdp_dbg(sk, "%s done\n", __func__);
 }
 
-static void sdp_send_active_reset(struct sock *sk, gfp_t priority)
+static void sdp_send_disconnect(struct sock *sk)
 {
-       sk->sk_prot->disconnect(sk, 0);
+       sdp_sk(sk)->sdp_disconnect = 1;
+       sdp_post_sends(sdp_sk(sk), 0);
 }
 
 /*
@@ -452,7 +450,7 @@ static int sdp_close_state(struct sock *sk)
                return 0;
 
        if (sk->sk_state == TCP_ESTABLISHED)
-               sdp_set_state(sk, TCP_FIN_WAIT1);
+               sdp_set_state(sk, TCP_FIN_WAIT2);/* should be TCP_FIN_WAIT1 */
        else if (sk->sk_state == TCP_CLOSE_WAIT)
                sdp_set_state(sk, TCP_LAST_ACK);
        else
@@ -466,7 +464,13 @@ static void sdp_close(struct sock *sk, long timeout)
        struct sk_buff *skb;
        int data_was_unread = 0;
 
+       if ((1 << sk->sk_state) & (TCPF_TIME_WAIT | TCPF_CLOSE)) {
+               sock_put(sk);
+               return;
+       }
+
        lock_sock(sk);
+       sock_hold(sk);
 
        sdp_dbg(sk, "%s\n", __func__);
 
@@ -504,18 +508,23 @@ static void sdp_close(struct sock *sk, long timeout)
        if (data_was_unread) {
                /* Unread data was tossed, zap the connection. */
                NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
-               sdp_set_state(sk, TCP_CLOSE);
-               sdp_send_active_reset(sk, GFP_KERNEL);
+               sdp_set_state(sk, TCP_TIME_WAIT);
+
+               /* Go into abortive close */
+               sk->sk_prot->disconnect(sk, 0);
        } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
                /* Check zero linger _after_ checking for unread data. */
-               sk->sk_prot->disconnect(sk, 0);
                NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
+               sdp_set_state(sk, TCP_TIME_WAIT);
+
+               /* Go into abortive close */
+               sk->sk_prot->disconnect(sk, 0);
        } else if (sdp_close_state(sk)) {
                /* We FIN if the application ate all the data before
                 * zapping the connection.
                 */
 
-               sdp_post_sends(sdp_sk(sk), 0);
+               sdp_send_disconnect(sk);
        }
 
        /* TODO: state should move to CLOSE or CLOSE_WAIT etc on disconnect.
@@ -533,7 +542,6 @@ adjudge_to_death:
         */
        lock_sock(sk);
 
-       sock_hold(sk);
        sock_orphan(sk);
 
        /*      This is a (useful) BSD violating of the RFC. There is a
@@ -549,15 +557,8 @@ adjudge_to_death:
         *      consume significant resources. Let's do it with special
         *      linger2 option.                                 --ANK
         */
-
-       if (sk->sk_state == TCP_FIN_WAIT2 &&
-               !sk->sk_send_head &&
-               sdp_sk(sk)->tx_head == sdp_sk(sk)->tx_tail) {
-               sdp_set_state(sk, TCP_CLOSE);
-       }
-
        if ((1 << sk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2)) {
-               sdp_sk(sk)->time_wait = 1;
+               sdp_sk(sk)->fin_wait_timeout = 1;
                /* TODO: liger2 unimplemented.
                   We should wait 3.5 * rto. How do I know rto? */
                /* TODO: tcp_fin_time to get timeout */
@@ -566,18 +567,20 @@ adjudge_to_death:
                atomic_inc(sk->sk_prot->orphan_count);
                queue_delayed_work(sdp_workqueue, &sdp_sk(sk)->time_wait_work,
                                   TCP_FIN_TIMEOUT);
-               goto out;
        }
 
        /* TODO: limit number of orphaned sockets.
           TCP has sysctl_tcp_mem and sysctl_tcp_max_orphans */
-       sock_put(sk);
 
-       /* Otherwise, socket is reprieved until protocol close. */
-out:
-       sdp_dbg(sk, "%s: last socket put %d\n", __func__,
-               atomic_read(&sk->sk_refcnt));
+       if (sk->sk_state != TCP_CLOSE) {
+               sdp_dbg(sk, "Hold socket till end of Infiniband tear-down\n");
+               /* Hold socket till end of Infiniband tear-down */
+               sock_hold(sk);
+       }
+
        release_sock(sk);
+
+       sock_put(sk);
        sk_common_release(sk);
 }
 
@@ -635,11 +638,13 @@ static int sdp_disconnect(struct sock *sk, int flags)
        struct rdma_cm_id *id;
 
        sdp_dbg(sk, "%s\n", __func__);
-       if (ssk->id)
-               rc = rdma_disconnect(ssk->id);
 
-       if (old_state != TCP_LISTEN)
+       if (old_state != TCP_LISTEN) {
+               if (ssk->id)
+                       rc = rdma_disconnect(ssk->id);
+
                return rc;
+       }
 
        sdp_set_state(sk, TCP_CLOSE);
        id = ssk->id;
@@ -827,14 +832,23 @@ static int sdp_ioctl(struct sock *sk, int cmd, unsigned 
long arg)
        return put_user(answ, (int __user *)arg); 
 }
 
+void sdp_cancel_fin_wait_timeout(struct sdp_sock *ssk)
+{
+       ssk->fin_wait_timeout = 0;
+       cancel_delayed_work(&ssk->time_wait_work);
+       atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
+}
+
 void sdp_destroy_work(struct work_struct *work)
 {
        struct sdp_sock *ssk = container_of(work, struct sdp_sock, 
destroy_work);
        struct sock *sk = &ssk->isk.sk;
        sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
 
-       cancel_delayed_work(&sdp_sk(sk)->time_wait_work);
-       atomic_dec(sk->sk_prot->orphan_count);
+       if (ssk->fin_wait_timeout)
+               sdp_cancel_fin_wait_timeout(ssk);
+
+       sdp_set_state(sk, TCP_CLOSE);
 
        sock_put(sk);
 }
@@ -846,26 +860,20 @@ void sdp_time_wait_work(struct work_struct *work)
        lock_sock(sk);
        sdp_dbg(sk, "%s\n", __func__);
 
-       if (!sdp_sk(sk)->time_wait) {
+       if (!sdp_sk(sk)->fin_wait_timeout) {
                release_sock(sk);
                return;
        }
 
        sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
 
-       sdp_set_state(sk, TCP_CLOSE);
-       sdp_sk(sk)->time_wait = 0;
+       sdp_sk(sk)->fin_wait_timeout = 0;
        release_sock(sk);
 
        atomic_dec(sk->sk_prot->orphan_count);
-       sock_put(sk);
-}
 
-void sdp_time_wait_destroy_sk(struct sdp_sock *ssk)
-{
-       ssk->time_wait = 0;
-       sdp_set_state(&ssk->isk.sk, TCP_CLOSE);
-       queue_work(sdp_workqueue, &ssk->destroy_work);
+       if (sdp_sk(sk)->id)
+               rdma_disconnect(sdp_sk(sk)->id);
 }
 
 static int sdp_init_sock(struct sock *sk)
@@ -880,6 +888,9 @@ static int sdp_init_sock(struct sock *sk)
        INIT_WORK(&ssk->destroy_work, sdp_destroy_work);
 
        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
+
+       ssk->sdp_disconnect = 0;
+
        return 0;
 }
 
@@ -895,7 +906,7 @@ static void sdp_shutdown(struct sock *sk, int how)
                return;
 
        if (sk->sk_state == TCP_ESTABLISHED)
-               sdp_set_state(sk, TCP_FIN_WAIT1);
+               sdp_set_state(sk, TCP_FIN_WAIT2);/* should be TCP_FIN_WAIT1 */
        else if (sk->sk_state == TCP_CLOSE_WAIT)
                sdp_set_state(sk, TCP_LAST_ACK);
        else
@@ -910,7 +921,7 @@ static void sdp_shutdown(struct sock *sk, int how)
        if (ssk->nonagle & TCP_NAGLE_OFF)
                ssk->nonagle |= TCP_NAGLE_PUSH;
 
-       sdp_post_sends(ssk, 0);
+       sdp_send_disconnect(sk);
 }
 
 static void sdp_mark_push(struct sdp_sock *ssk, struct sk_buff *skb)
-- 
1.5.3

_______________________________________________
general mailing list
general@lists.openfabrics.org
http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to