On 15/05/2017 11:07, Davidlohr Bueso wrote:
> --- /dev/null
> +++ b/include/linux/range_lock.h
> @@ -0,0 +1,181 @@
> +/*
> + * Range/interval rw-locking
> + * -------------------------
> + *
> + * Interval-tree based range locking is about controlling tasks' forward
> + * progress when adding an arbitrary interval (node) to the tree, depending
> + * on any overlapping ranges. A task can only continue (wakeup) if there are
> + * no intersecting ranges, thus achieving mutual exclusion. To this end, a
> + * reference counter is kept for each intersecting range in the tree
> + * (_before_ adding itself to it). To enable shared locking semantics,
> + * the reader to-be-locked will not take reference if an intersecting node
> + * is also a reader, therefore ignoring the node altogether.
> + *
> + * Fairness and freedom of starvation are guaranteed by the lack of lock
> + * stealing, thus range locks depend directly on interval tree semantics.
> + * This is particularly for iterations, where the key for the rbtree is
> + * given by the interval's low endpoint, and duplicates are walked as it
> + * would an inorder traversal of the tree.
> + *
> + * The cost of lock and unlock of a range is O((1+R_int)log(R_all)) where
> + * R_all is total number of ranges and R_int is the number of ranges
> + * intersecting the operated range.
> + */
> +#ifndef _LINUX_RANGE_LOCK_H
> +#define _LINUX_RANGE_LOCK_H
> +
> +#include <linux/rbtree.h>
> +#include <linux/interval_tree.h>
> +#include <linux/list.h>
> +#include <linux/spinlock.h>
> +
> +/*
> + * The largest range will span [0,RANGE_LOCK_FULL].
> + */
> +#define RANGE_LOCK_FULL  ~0UL
> +
> +struct range_lock {
> +     struct interval_tree_node node;
> +     struct task_struct *tsk;
> +     /* Number of ranges which are blocking acquisition of the lock */
> +     unsigned int blocking_ranges;
> +     u64 seqnum;
> +};
> +
> +struct range_lock_tree {
> +     struct rb_root root;
> +     spinlock_t lock;
> +     struct interval_tree_node *leftmost; /* compute smallest 'start' */
> +     u64 seqnum; /* track order of incoming ranges, avoid overflows */
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +     struct lockdep_map dep_map;
> +#endif
> +};
> +
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +# define __RANGE_LOCK_DEP_MAP_INIT(lockname) , .dep_map = { .name = 
> #lockname }
> +#else
> +# define __RANGE_LOCK_DEP_MAP_INIT(lockname)
> +#endif
> +
> +#define __RANGE_LOCK_TREE_INITIALIZER(name)          \
> +     { .leftmost = NULL                              \
> +     , .root = RB_ROOT                               \
> +     , .seqnum = 0                                   \
> +     , .lock = __SPIN_LOCK_UNLOCKED(name.lock)       \
> +     __RANGE_LOCK_DEP_MAP_INIT(name) }               \
> +
> +#define DEFINE_RANGE_LOCK_TREE(name) \
> +     struct range_lock_tree name = __RANGE_LOCK_TREE_INITIALIZER(name)
> +
> +#define __RANGE_LOCK_INITIALIZER(__start, __last) {  \
> +             .node = {                               \
> +                     .start = (__start)              \
> +                     ,.last = (__last)               \
> +             }                                       \
> +             , .task = NULL                          \
                   ^tsk

> +             , .blocking_ranges = 0                  \
> +             , .reader = false                       \
                   ^ this field doesn't exist anymore

Cheers,
Laurent.

Reply via email to