On Mon, 2013-04-15 at 10:37 -0400, Waiman Long wrote:
[...]
> +typedef struct mspin_node {
> +     struct mspin_node *next;
> +     int                locked;      /* 1 if lock acquired */
> +} mspin_node_t;
> +
> +typedef mspin_node_t *mspin_lock_t;

I think we could do without the typedefs, specially mspin_lock_t.

> +
> +#define      MLOCK(mutex)    ((mspin_lock_t *)&((mutex)->spin_mlock))
> +
> +static noinline void mspin_lock(mspin_lock_t *lock,  mspin_node_t *node)
> +{
> +     mspin_node_t *prev;
> +
> +     /* Init node */
> +     node->locked = 0;
> +     node->next   = NULL;
> +
> +     prev = xchg(lock, node);
> +     if (likely(prev == NULL)) {
> +             /* Lock acquired */
> +             node->locked = 1;
> +             return;
> +     }
> +     ACCESS_ONCE(prev->next) = node;
> +     smp_wmb();
> +     /* Wait until the lock holder passes the lock down */
> +     while (!ACCESS_ONCE(node->locked))
> +             arch_mutex_cpu_relax();
> +}
> +
> +static void mspin_unlock(mspin_lock_t *lock,  mspin_node_t *node)
> +{
> +     mspin_node_t *next = ACCESS_ONCE(node->next);
> +
> +     if (likely(!next)) {
> +             /*
> +              * Release the lock by setting it to NULL
> +              */
> +             if (cmpxchg(lock, node, NULL) == node)
> +                     return;
> +             /* Wait until the next pointer is set */
> +             while (!(next = ACCESS_ONCE(node->next)))
> +                     arch_mutex_cpu_relax();
> +     }
> +     barrier();
> +     ACCESS_ONCE(next->locked) = 1;
> +     smp_wmb();

Do we really need the compiler barrier call? The CPUs can reorder
anyway. I assume the smp_wbm() call makes sure no there's no funny
business before the next lock is acquired, might be worth commenting.

Thanks,
Davidlohr

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to