On Sun, Nov 02, 2014 at 06:07:20PM +0100, Attilio Rao wrote:
> On Sun, Nov 2, 2014 at 5:59 PM, Konstantin Belousov <kostik...@gmail.com> 
> wrote:
> > It is easy and cheap to record the set of the owned lockmgr locks for
> > current thread. I do not believe that we have a situation where more
> > than 3 locks are held in one time. To give it some slack, we can record
> > 8 locks shared-owned and do the check for recursion in LK_CAN_SHARE().
> > If the thread-locks table overflows, we could either panic() or fall
> > back to indiscriminative deadlock avoidance (i.e. relying on the sole
> > td_lk_slocks value).
> 
> I don't think it is going to be cheap (and certainly it is not viable
> for things like sx locks and rwlocks).
sx and rw do not implement exclusive starvation avoidance.

> Checking for the owner chain anytime you acquire the lock is certainly
> not I would call cheap.

I did not proposed to verify owner chain.  I said that it is easy to
record the locks owned by current thread, only for current thread
consumption.  Below is the prototype.

diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 24d94cc..2b60345 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -75,8 +75,6 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
 #define        TD_LOCKS_INC(td)        ((td)->td_locks++)
 #define        TD_LOCKS_DEC(td)        ((td)->td_locks--)
 #endif
-#define        TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
-#define        TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
 
 #ifndef DEBUG_LOCKS
 #define        STACK_PRINT(lk)
@@ -115,11 +113,77 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
        }                                                               \
 } while (0)
 
-#define        LK_CAN_SHARE(x, flags)                                          
\
-       (((x) & LK_SHARE) &&                                            \
-       (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
-       (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||  \
-       (curthread->td_pflags & TDP_DEADLKTREAT)))
+static inline bool
+lk_can_share(struct thread *td, void *lk, uintptr_t x, int flags)
+{
+       int i, idx, mask;
+
+       if ((x & LK_SHARE) == 0)
+               return (false);
+       if (td->td_lk_slocks < nitems(td->td_lk_slocksp)) {
+               mask = td->td_lk_smask;
+               for (i = 0; i < td->td_lk_slocks; i++) {
+                       idx = ffs(mask) - 1;
+
+                       /*
+                        * Current thread definitely owns the same
+                        * lock in shared mode already, grant the
+                        * request despite possible presence of the
+                        * exclusive waiters, to avoid deadlocks.
+                        */
+                       if (lk == td->td_lk_slocksp[i])
+                               return (true);
+
+                       mask &= ~(1 << idx);
+               }
+       } else {
+               /*
+                * All slots filled, fall to the safe side.
+                * XXXKIB: panic instead ?
+                */
+               return (true);
+       }
+       if ((curthread->td_pflags & TDP_DEADLKTREAT) != 0)
+               return (true);
+       if ((x & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) != 0)
+               return (false);
+       return (true);
+}
+
+static inline void
+lk_slocks_inc(struct thread *td, void *lk)
+{
+       int i;
+
+       if (td->td_lk_slocks < nitems(td->td_lk_slocksp)) {
+               for (i = 0; i < nitems(td->td_lk_slocksp); i++) {
+                       if (td->td_lk_slocksp[i] == NULL) {
+                               td->td_lk_slocksp[i] = lk;
+                               td->td_lk_smask |= 1 << i;
+                               break;
+                       }
+               }
+               KASSERT(i < nitems(td->td_lk_slocksp), ("leaked pointer"));
+       }
+       td->td_lk_slocks++;
+}
+
+static inline void
+lk_slocks_dec(struct thread *td, void *lk)
+{
+       int i, mask;
+
+       mask = td->td_lk_smask;
+       for (i = 0; i < nitems(td->td_lk_slocksp); i++) {
+               if (lk == td->td_lk_slocksp[i]) {
+                       td->td_lk_slocksp[i] = NULL;
+                       td->td_lk_smask &= ~(1 << i);
+                       break;
+               }
+       }
+       td->td_lk_slocks--;
+}
+
 #define        LK_TRYOP(x)                                                     
\
        ((x) & LK_NOWAIT)
 
@@ -338,7 +402,7 @@ wakeupshlk(struct lock *lk, const char *file, int line)
 
        lock_profile_release_lock(&lk->lock_object);
        TD_LOCKS_DEC(curthread);
-       TD_SLOCKS_DEC(curthread);
+       lk_slocks_dec(curthread, lk);
        return (wakeup_swapper);
 }
 
@@ -531,7 +595,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct 
lock_object *ilk,
                         * waiters, if we fail to acquire the shared lock
                         * loop back and retry.
                         */
-                       if (LK_CAN_SHARE(x, flags)) {
+                       if (lk_can_share(curthread, lk, x, flags)) {
                                if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
                                    x + LK_ONE_SHARER))
                                        break;
@@ -615,7 +679,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct 
lock_object *ilk,
                                                    __func__, lk, spintries, i);
                                        x = lk->lk_lock;
                                        if ((x & LK_SHARE) == 0 ||
-                                           LK_CAN_SHARE(x) != 0)
+                                           lk_can_share(curthread, lk,
+                                           x, flags) != 0)
                                                break;
                                        cpu_spinwait();
                                }
@@ -636,7 +701,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct 
lock_object *ilk,
                         * if the lock can be acquired in shared mode, try
                         * again.
                         */
-                       if (LK_CAN_SHARE(x, flags)) {
+                       if (lk_can_share(curthread, lk, x, flags)) {
                                sleepq_release(&lk->lock_object);
                                continue;
                        }
@@ -698,7 +763,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct 
lock_object *ilk,
                        WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
                            line);
                        TD_LOCKS_INC(curthread);
-                       TD_SLOCKS_INC(curthread);
+                       lk_slocks_inc(curthread, lk);
                        STACK_SAVE(lk);
                }
                break;
@@ -719,7 +784,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct 
lock_object *ilk,
                            line);
                        WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
                            LK_TRYWIT(flags), file, line);
-                       TD_SLOCKS_DEC(curthread);
+                       lk_slocks_dec(curthread, lk);
                        break;
                }
 
@@ -974,7 +1039,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct 
lock_object *ilk,
                        panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
                            __func__, iwmesg, file, line);
                }
-               TD_SLOCKS_INC(curthread);
+               lk_slocks_inc(curthread, lk);
 
                /*
                 * In order to preserve waiters flags, just spin.
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index f2ffab2..e4f9d64 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -390,7 +390,6 @@ compute_cn_lkflags(struct mount *mp, int lkflags, int 
cnflags)
                lkflags &= ~LK_SHARED;
                lkflags |= LK_EXCLUSIVE;
        }
-       lkflags |= LK_NODDLKTREAT;
        return (lkflags);
 }
 
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index ff0473d..a48523f 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -158,7 +158,6 @@ _lockmgr_args_rw(struct lock *lk, u_int flags, struct 
rwlock *ilk,
 #define        LK_RETRY        0x000400
 #define        LK_SLEEPFAIL    0x000800
 #define        LK_TIMELOCK     0x001000
-#define        LK_NODDLKTREAT  0x002000
 
 /*
  * Operations for lockmgr().
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index fac0915..4562e1a 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -237,6 +237,8 @@ struct thread {
        short           td_rw_rlocks;   /* (k) Count of rwlock read locks. */
        short           td_lk_slocks;   /* (k) Count of lockmgr shared locks. */
        short           td_stopsched;   /* (k) Scheduler stopped. */
+       void            *td_lk_slocksp[8];/* (k) */
+       int             td_lk_smask;    /* (k) */
        struct turnstile *td_blocked;   /* (t) Lock thread is blocked on. */
        const char      *td_lockname;   /* (t) Name of lock blocked on. */
        LIST_HEAD(, turnstile) td_contested;    /* (q) Contested locks. */
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to