On Wed, Dec 20, 2017 at 12:17:27PM +0100, Martin Pieuchot wrote:
> On 15/12/17(Fri) 22:03, Mateusz Guzik wrote:
> > > +void
> > > +__mtx_enter(struct mutex *mtx)
> > > +{
> > > +#ifdef MP_LOCKDEBUG
> > > +     int nticks = __mp_lock_spinout;
> > > +#endif
> > > +
> > > +     while (__mtx_enter_try(mtx) == 0) {
> > > +             CPU_BUSY_CYCLE();
> >
> > So this is effectively __mtx_enter_try with single pause in-between.
> >
> > > +}
> > > +
> > > +int
> > > +__mtx_enter_try(struct mutex *mtx)
> > > +{
> > > +     struct cpu_info *owner, *ci = curcpu();
> > > +     int s;
> > > +
> > > +     if (mtx->mtx_wantipl != IPL_NONE)
> > > +             s = splraise(mtx->mtx_wantipl);
> > > +
> >
> > This is at least one read.
> >
> > > +     owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
> > > +#ifdef DIAGNOSTIC
> > > +     if (__predict_false(owner == ci))
> > > +             panic("mtx %p: locking against myself", mtx);
> > > +#endif
> > > +     if (owner == NULL) {
> > > +             membar_enter_after_atomic();
> > > +             if (mtx->mtx_wantipl != IPL_NONE)
> > > +                     mtx->mtx_oldipl = s;
> >
> > This repeats the read done earlier.
> >
> > Since the caller loops, this is effectively a very inefficient lock of
> > the form:
> > while (!atomic_cas_ptr(...))
> >         CPU_BUSY_CYCLE();
> >
> > + some reads in-between
> >

So how about the patch below. It booted fine and I did a make -j 8 of
the kernel build with it. Unfortunately the vm in question is running in
too volatile invironment for any kind of performance testing atm.
Impact should be minor anyway.

Note this is still a super basic lock with giant room for improvement.
I'm not committing to any kind of work in the area, but I may contribute
other code in the future if stuff of this sort is fine with you.

diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 64c5a1f6319..5c14f2b6720 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -253,34 +253,14 @@ __mtx_init(struct mutex *mtx, int wantipl)
 }

 #ifdef MULTIPROCESSOR
-void
-__mtx_enter(struct mutex *mtx)
-{
-#ifdef MP_LOCKDEBUG
-       int nticks = __mp_lock_spinout;
-#endif
-
-       while (__mtx_enter_try(mtx) == 0) {
-               CPU_BUSY_CYCLE();
-
-#ifdef MP_LOCKDEBUG
-       int nticks = __mp_lock_spinout;
-#endif
-
-       while (__mtx_enter_try(mtx) == 0) {
-               CPU_BUSY_CYCLE();
-
-#ifdef MP_LOCKDEBUG
-               if (--nticks == 0) {
-                       db_printf("%s: %p lock spun out", __func__, mtx);
-                       db_enter();
-                       nticks = __mp_lock_spinout;
-               }
-#endif
-       }
-}
-
 int
-__mtx_enter_try(struct mutex *mtx)
+__mtx_enter_common(struct mutex *mtx, int wantipl, struct cpu_info *ci)
 {
-       struct cpu_info *owner, *ci = curcpu();
+       struct cpu_info *owner;
        int s;

-       if (mtx->mtx_wantipl != IPL_NONE)
-               s = splraise(mtx->mtx_wantipl);
+       if (wantipl != IPL_NONE)
+               s = splraise(wantipl);

        owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
 #ifdef DIAGNOSTIC
@@ -289,7 +269,7 @@ __mtx_enter_try(struct mutex *mtx)
 #endif
        if (owner == NULL) {
                membar_enter_after_atomic();
-               if (mtx->mtx_wantipl != IPL_NONE)
+               if (wantipl != IPL_NONE)
                        mtx->mtx_oldipl = s;
 #ifdef DIAGNOSTIC
                ci->ci_mutex_level++;
@@ -297,11 +277,49 @@ __mtx_enter_try(struct mutex *mtx)
                return (1);
        }

-       if (mtx->mtx_wantipl != IPL_NONE)
+       if (wantipl != IPL_NONE)
                splx(s);

        return (0);
 }
+
+void
+__mtx_enter(struct mutex *mtx)
+{
+       struct cpu_info *ci = curcpu();
+       int wantipl = mtx->mtx_wantipl;
+#ifdef MP_LOCKDEBUG
+       int nticks = __mp_lock_spinout;
+#endif
+
+       for (;;) {
+               if (__mtx_enter_common(mtx, wantipl, ci))
+                       break;
+
+               for (;;) {
+#ifdef MP_LOCKDEBUG
+                       if (--nticks == 0) {
+                               db_printf("%s: %p lock spun out", __func__,
mtx);
+                               db_enter();
+                               nticks = __mp_lock_spinout;
+                       }
+#endif
+                       CPU_BUSY_CYCLE();
+                       if (mtx->mtx_owner == NULL)
+                               break;
+               }
+       }
+}
+
+int
+__mtx_enter_try(struct mutex *mtx)
+{
+       struct cpu_info *ci = curcpu();
+       int wantipl = mtx->mtx_wantipl;
+
+       return (__mtx_enter_common(mtx, wantipl, ci));
+}
+
 #else
 void
 __mtx_enter(struct mutex *mtx)

-- 
Mateusz Guzik <mjguzik gmail.com>

Reply via email to