On Mon, May 15, 2017 at 02:07:21AM -0700, Davidlohr Bueso wrote:
> +static inline int wait_for_ranges(struct range_lock_tree *tree,
> +                               struct range_lock *lock, long state)
> +{
> +     int ret = 0;
> +
> +     while (true) {
> +             set_current_state(state);
> +
> +             /* do we need to go to sleep? */
> +             if (!lock->blocking_ranges)
> +                     break;
> +
> +             if (unlikely(signal_pending_state(state, current))) {
> +                     struct interval_tree_node *node;
> +                     unsigned long flags;
> +                     DEFINE_WAKE_Q(wake_q);
> +
> +                     ret = -EINTR;
> +                     /*
> +                      * We're not taking the lock after all, cleanup
> +                      * after ourselves.
> +                      */
> +                     spin_lock_irqsave(&tree->lock, flags);
> +
> +                     range_lock_clear_reader(lock);
> +                     __range_tree_remove(tree, lock);
> +
> +                     if (!__range_intersects_intree(tree, lock))
> +                             goto unlock;
> +
> +                     range_interval_tree_foreach(node, &tree->root,
> +                                                 lock->node.start,
> +                                                 lock->node.last) {
> +                             struct range_lock *blked;
> +                             blked = to_range_lock(node);
> +
> +                             if (range_lock_is_reader(lock) &&
> +                                 range_lock_is_reader(blked))
> +                                     continue;
> +
> +                             /* unaccount for threads _we_ are blocking */
> +                             if (lock->seqnum < blked->seqnum)
> +                                     range_lock_put(blked, &wake_q);
> +                     }
> +
> +             unlock:
> +                     spin_unlock_irqrestore(&tree->lock, flags);
> +                     wake_up_q(&wake_q);
> +                     break;
> +             }
> +
> +             schedule();
> +     }
> +
> +     __set_current_state(TASK_RUNNING);
> +     return ret;
> +}


> +void range_read_unlock(struct range_lock_tree *tree, struct range_lock *lock)
> +{
> +     struct interval_tree_node *node;
> +     unsigned long flags;
> +     DEFINE_WAKE_Q(wake_q);
> +
> +     spin_lock_irqsave(&tree->lock, flags);
> +
> +     range_lock_clear_reader(lock);
> +     __range_tree_remove(tree, lock);
> +
> +     range_lock_release(&tree->dep_map, 1, _RET_IP_);
> +
> +     if (!__range_intersects_intree(tree, lock)) {
> +             /* nobody to wakeup, we're done */
> +             spin_unlock_irqrestore(&tree->lock, flags);
> +             return;
> +     }
> +
> +     range_interval_tree_foreach(node, &tree->root,
> +                                 lock->node.start, lock->node.last) {
> +             struct range_lock *blocked_lock;
> +             blocked_lock = to_range_lock(node);
> +
> +             if (!range_lock_is_reader(blocked_lock))
> +                     range_lock_put(blocked_lock, &wake_q);
> +     }
> +
> +     spin_unlock_irqrestore(&tree->lock, flags);
> +     wake_up_q(&wake_q);
> +}
> +EXPORT_SYMBOL_GPL(range_read_unlock);

> +void range_write_unlock(struct range_lock_tree *tree, struct range_lock 
> *lock)
> +{
> +     struct interval_tree_node *node;
> +     unsigned long flags;
> +     DEFINE_WAKE_Q(wake_q);
> +
> +     spin_lock_irqsave(&tree->lock, flags);
> +
> +     range_lock_clear_reader(lock);
> +     __range_tree_remove(tree, lock);
> +
> +     range_lock_release(&tree->dep_map, 1, _RET_IP_);
> +
> +     if (!__range_intersects_intree(tree, lock)) {
> +             /* nobody to wakeup, we're done */
> +             spin_unlock_irqrestore(&tree->lock, flags);
> +             return;
> +     }
> +
> +     range_interval_tree_foreach(node, &tree->root,
> +                                 lock->node.start, lock->node.last) {
> +             struct range_lock *blocked_lock;
> +             blocked_lock = to_range_lock(node);
> +
> +             range_lock_put(blocked_lock, &wake_q);
> +     }
> +
> +     spin_unlock_irqrestore(&tree->lock, flags);
> +     wake_up_q(&wake_q);
> +}
> +EXPORT_SYMBOL_GPL(range_write_unlock);


There is significant duplication here. Can't we have a
__range_unlock_common() and use that 3 times?


Reply via email to