From: Jennifer Hunt [EMAIL PROTECTED]
With the inital implementation we missed to implement a skb backlog
queue . The result is that socket receive processing tossed packets.
Since AF_IUCV connections are working synchronously it leads to
connection hangs. Problems with read, close and select also
occured.
Using a skb backlog queue is fixing all of these problems .
Signed-off-by: Frank Pavlic [EMAIL PROTECTED]
---
include/net/iucv/af_iucv.h |2
net/iucv/af_iucv.c | 160 -
2 files changed, 133 insertions(+), 29 deletions(-)
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 04d1abb..f9bd11b 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -28,6 +28,7 @@ enum {
IUCV_LISTEN,
IUCV_SEVERED,
IUCV_DISCONN,
+ IUCV_CLOSING,
IUCV_CLOSED
};
@@ -62,6 +63,7 @@ struct iucv_sock {
struct sock *parent;
struct iucv_path*path;
struct sk_buff_head send_skb_q;
+ struct sk_buff_head backlog_skb_q;
unsigned intsend_tag;
};
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index acc9421..0c2e4a8 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -147,6 +147,7 @@ static void iucv_sock_close(struct sock *sk)
unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
int err;
+ unsigned long timeo;
iucv_sock_clear_timer(sk);
lock_sock(sk);
@@ -159,6 +160,21 @@ static void iucv_sock_close(struct sock *sk)
case IUCV_CONNECTED:
case IUCV_DISCONN:
err = 0;
+
+ sk-sk_state = IUCV_CLOSING;
+ sk-sk_state_change(sk);
+
+ if(!skb_queue_empty(iucv-send_skb_q)) {
+ if (sock_flag(sk, SOCK_LINGER) sk-sk_lingertime)
+ timeo = sk-sk_lingertime;
+ else
+ timeo = IUCV_DISCONN_TIMEOUT;
+ err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
+ }
+
+ sk-sk_state = IUCV_CLOSED;
+ sk-sk_state_change(sk);
+
if (iucv-path) {
low_nmcpy(user_data, iucv-src_name);
high_nmcpy(user_data, iucv-dst_name);
@@ -168,12 +184,11 @@ static void iucv_sock_close(struct sock *sk)
iucv-path = NULL;
}
- sk-sk_state = IUCV_CLOSED;
- sk-sk_state_change(sk);
sk-sk_err = ECONNRESET;
sk-sk_state_change(sk);
skb_queue_purge(iucv-send_skb_q);
+ skb_queue_purge(iucv-backlog_skb_q);
sock_set_flag(sk, SOCK_ZAPPED);
break;
@@ -204,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock,
int proto, gfp_t prio)
sock_init_data(sock, sk);
INIT_LIST_HEAD(iucv_sk(sk)-accept_q);
skb_queue_head_init(iucv_sk(sk)-send_skb_q);
+ skb_queue_head_init(iucv_sk(sk)-backlog_skb_q);
iucv_sk(sk)-send_tag = 0;
sk-sk_destruct = iucv_sock_destruct;
@@ -510,7 +526,7 @@ static int iucv_sock_accept(struct socket *sock, struct
socket *newsock,
long timeo;
int err = 0;
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk-sk_state != IUCV_LISTEN) {
err = -EBADFD;
@@ -530,7 +546,7 @@ static int iucv_sock_accept(struct socket *sock, struct
socket *newsock,
release_sock(sk);
timeo = schedule_timeout(timeo);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk-sk_state != IUCV_LISTEN) {
err = -EBADFD;
@@ -606,7 +622,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct
socket *sock,
if(!(skb = sock_alloc_send_skb(sk, len,
msg-msg_flags MSG_DONTWAIT,
err)))
- return err;
+ goto out;
if (memcpy_fromiovec(skb_put(skb, len), msg-msg_iov, len)){
err = -EFAULT;
@@ -647,10 +663,16 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct
socket *sock,
{
int noblock = flags MSG_DONTWAIT;
struct sock *sk = sock-sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
int target, copied = 0;
- struct sk_buff *skb;
+ struct sk_buff *skb, *rskb, *cskb;
int err = 0;
+ if ((sk-sk_state == IUCV_DISCONN || sk-sk_state == IUCV_SEVERED)
+ skb_queue_empty(iucv-backlog_skb_q)
+ skb_queue_empty(sk-sk_receive_queue))
+ return 0;
+
if (flags (MSG_OOB))
return -EOPNOTSUPP;
@@ -665,10 +687,12 @@ static int