* Ingo Molnar <[EMAIL PROTECTED]> wrote:

> (the #ifdef LOCKDEP should probably be converted to some sort of 
> lockdep_split_lock_key(&sk->sk_receive_queue.lock) op - i'll do that 
> later)

i've added such an op, lockdep_reinit_lock_key() - this makes the patch 
cleaner:

----------------------
Subject: undo AF_UNIX _bh locking changes and split lock-type
From: Ingo Molnar <[EMAIL PROTECTED]>

this cleans up lock-validator-special-locking-af_unix.patch: instead
of adding _bh locking to AF_UNIX, this patch splits their
sk_receive_queue.lock type from the other networking skb-queue locks.

Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 net/unix/af_unix.c |   18 ++++++++++++++----
 net/unix/garbage.c |    8 ++++----
 2 files changed, 18 insertions(+), 8 deletions(-)

Index: linux/net/unix/af_unix.c
===================================================================
--- linux.orig/net/unix/af_unix.c
+++ linux/net/unix/af_unix.c
@@ -542,6 +542,14 @@ static struct proto unix_proto = {
        .obj_size = sizeof(struct unix_sock),
 };
 
+/*
+ * AF_UNIX sockets do not interact with hardware, hence they
+ * dont trigger interrupts - so it's safe for them to have
+ * bh-unsafe locking for their sk_receive_queue.lock. Split off
+ * this special lock-type by reinitializing the spinlock key:
+ */
+static struct lockdep_type_key af_unix_sk_receive_queue_lock_key;
+
 static struct sock * unix_create1(struct socket *sock)
 {
        struct sock *sk = NULL;
@@ -557,6 +565,8 @@ static struct sock * unix_create1(struct
        atomic_inc(&unix_nr_socks);
 
        sock_init_data(sock,sk);
+       lockdep_reinit_lock_key(&sk->sk_receive_queue.lock,
+                               &af_unix_sk_receive_queue_lock_key);
 
        sk->sk_write_space      = unix_write_space;
        sk->sk_max_ack_backlog  = sysctl_unix_max_dgram_qlen;
@@ -1073,12 +1083,12 @@ restart:
        unix_state_wunlock(sk);
 
        /* take ten and and send info to listening sock */
-       spin_lock_bh(&other->sk_receive_queue.lock);
+       spin_lock(&other->sk_receive_queue.lock);
        __skb_queue_tail(&other->sk_receive_queue, skb);
        /* Undo artificially decreased inflight after embrion
         * is installed to listening socket. */
        atomic_inc(&newu->inflight);
-       spin_unlock_bh(&other->sk_receive_queue.lock);
+       spin_unlock(&other->sk_receive_queue.lock);
        unix_state_runlock(other);
        other->sk_data_ready(other, 0);
        sock_put(other);
@@ -1843,7 +1853,7 @@ static int unix_ioctl(struct socket *soc
                                break;
                        }
 
-                       spin_lock_bh(&sk->sk_receive_queue.lock);
+                       spin_lock(&sk->sk_receive_queue.lock);
                        if (sk->sk_type == SOCK_STREAM ||
                            sk->sk_type == SOCK_SEQPACKET) {
                                skb_queue_walk(&sk->sk_receive_queue, skb)
@@ -1853,7 +1863,7 @@ static int unix_ioctl(struct socket *soc
                                if (skb)
                                        amount=skb->len;
                        }
-                       spin_unlock_bh(&sk->sk_receive_queue.lock);
+                       spin_unlock(&sk->sk_receive_queue.lock);
                        err = put_user(amount, (int __user *)arg);
                        break;
                }
Index: linux/net/unix/garbage.c
===================================================================
--- linux.orig/net/unix/garbage.c
+++ linux/net/unix/garbage.c
@@ -235,7 +235,7 @@ void unix_gc(void)
                struct sock *x = pop_stack();
                struct sock *sk;
 
-               spin_lock_bh(&x->sk_receive_queue.lock);
+               spin_lock(&x->sk_receive_queue.lock);
                skb = skb_peek(&x->sk_receive_queue);
                
                /*
@@ -270,7 +270,7 @@ void unix_gc(void)
                                maybe_unmark_and_push(skb->sk);
                        skb=skb->next;
                }
-               spin_unlock_bh(&x->sk_receive_queue.lock);
+               spin_unlock(&x->sk_receive_queue.lock);
                sock_put(x);
        }
 
@@ -283,7 +283,7 @@ void unix_gc(void)
                if (u->gc_tree == GC_ORPHAN) {
                        struct sk_buff *nextsk;
 
-                       spin_lock_bh(&s->sk_receive_queue.lock);
+                       spin_lock(&s->sk_receive_queue.lock);
                        skb = skb_peek(&s->sk_receive_queue);
                        while (skb &&
                               skb != (struct sk_buff *)&s->sk_receive_queue) {
@@ -298,7 +298,7 @@ void unix_gc(void)
                                }
                                skb = nextsk;
                        }
-                       spin_unlock_bh(&s->sk_receive_queue.lock);
+                       spin_unlock(&s->sk_receive_queue.lock);
                }
                u->gc_tree = GC_ORPHAN;
        }

-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to