Hi,

I just did the lock-free stack, which end up being much simpler than the queue
because there is no need for dummy head pointer. Comments are welcome.

Even though I did not do formal verification of the queue and stack, I feel
sufficiently confident to push them in urcu mainline. I'll wait for feedback
before cutting a release though. I also created test_urcu_lfq and test_urcu_lfs
which will also be in the tree. They perform heavy enqueue/dequeue and push/pop
to stress-test the algorithms. They check if the number of operations (e.g. push
vs pop) balance.

Thanks,

Mathieu


/*
 * rculfstack.h
 *
 * Userspace RCU library - Lock-Free RCU Stack
 *
 * Copyright 2010 - Mathieu Desnoyers <[email protected]>
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

struct rcu_lfs_node {
        struct rcu_lfs_node *next;
};

struct rcu_lfs_stack {
        struct rcu_lfs_node *head;
};

void rcu_lfs_node_init(struct rcu_lfs_node *node)
{
}

void rcu_lfs_init(struct rcu_lfs_stack *s)
{
        s->head = NULL;
}

void rcu_lfs_push(struct rcu_lfs_stack *s, struct rcu_lfs_node *node)
{
        rcu_read_lock();
        for (;;) {
                struct rcu_lfs_node *head = rcu_dereference(s->head);

                node->next = head;
                /*
                 * uatomic_cmpxchg() implicit memory barrier orders earlier
                 * stores to node before publication.
                 */
                if (uatomic_cmpxchg(&s->head, head, node) == head) {
                        rcu_read_unlock();
                        return;
                } else {
                        /* Failure to prepend. Retry. */
                        continue;
                }
        }
}

struct rcu_lfs_node *
rcu_lfs_pop(struct rcu_lfs_stack *s)
{
        rcu_read_lock();
        for (;;) {
                struct rcu_lfs_node *head = rcu_dereference(s->head);

                if (head) {
                        struct rcu_lfs_node *next = rcu_dereference(head->next);

                        if (uatomic_cmpxchg(&s->head, head, next) == head) {
                                rcu_read_unlock();
                                return head;
                        } else {
                                /* Concurrent modification. Retry. */
                                continue;
                        }
                } else {
                        /* Empty stack */
                        rcu_read_unlock();
                        return NULL;
                }
        }
}


-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

_______________________________________________
ltt-dev mailing list
[email protected]
http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev

Reply via email to