To enable the use of dlock-list in an interrupt handler, a new
irqsafe mode can now be specified at dlock-list allocation time as
an additional argument to alloc_dlock_list_heads(). With that mode
specified, the spin_lock_irqsave/spin_unlock_irqrestore pair will be
used instead of the regular lock and unlock calls.

Signed-off-by: Waiman Long <long...@redhat.com>
---
 fs/super.c                 |  2 +-
 include/linux/dlock-list.h | 22 +++++++++++++++++-----
 lib/dlock-list.c           | 46 ++++++++++++++++++++++++++++++++--------------
 3 files changed, 50 insertions(+), 20 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index a90a070..0840e54 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -214,7 +214,7 @@ static struct super_block *alloc_super(struct 
file_system_type *type, int flags,
        INIT_LIST_HEAD(&s->s_inodes_wb);
        spin_lock_init(&s->s_inode_wblist_lock);
 
-       if (alloc_dlock_list_heads(&s->s_inodes))
+       if (alloc_dlock_list_heads(&s->s_inodes, false))
                goto fail;
        if (list_lru_init_memcg(&s->s_dentry_lru))
                goto fail;
diff --git a/include/linux/dlock-list.h b/include/linux/dlock-list.h
index b374101..fcb2b71 100644
--- a/include/linux/dlock-list.h
+++ b/include/linux/dlock-list.h
@@ -32,6 +32,8 @@
 struct dlock_list_head {
        struct list_head list;
        spinlock_t lock;
+       int irqsafe;    /* IRQ safe mode */
+       unsigned long flags;
 } ____cacheline_aligned_in_smp;
 
 struct dlock_list_heads {
@@ -89,7 +91,12 @@ static inline void init_dlock_list_node(struct 
dlock_list_node *node)
  */
 static inline void dlock_list_unlock(struct dlock_list_iter *iter)
 {
-       spin_unlock(&iter->entry->lock);
+       struct dlock_list_head *h = iter->entry;
+
+       if (h->irqsafe)
+               spin_unlock_irqrestore(&h->lock, h->flags);
+       else
+               spin_unlock(&h->lock);
 }
 
 /**
@@ -98,14 +105,19 @@ static inline void dlock_list_unlock(struct 
dlock_list_iter *iter)
  */
 static inline void dlock_list_relock(struct dlock_list_iter *iter)
 {
-       spin_lock(&iter->entry->lock);
+       struct dlock_list_head *h = iter->entry;
+
+       if (h->irqsafe)
+               spin_lock_irqsave(&h->lock, h->flags);
+       else
+               spin_lock(&h->lock);
 }
 
 /*
  * Allocation and freeing of dlock list
  */
 extern int  __alloc_dlock_list_heads(struct dlock_list_heads *dlist,
-                                    struct lock_class_key *key);
+                                    int irqsafe, struct lock_class_key *key);
 extern void free_dlock_list_heads(struct dlock_list_heads *dlist);
 
 /**
@@ -113,10 +125,10 @@ extern int  __alloc_dlock_list_heads(struct 
dlock_list_heads *dlist,
  * @dlist  : Pointer to the dlock_list_heads structure to be initialized
  * Return  : 0 if successful, -ENOMEM if memory allocation error
  */
-#define alloc_dlock_list_heads(dlist)                                  \
+#define alloc_dlock_list_heads(dlist, irqsafe)                         \
 ({                                                                     \
        static struct lock_class_key _key;                              \
-       __alloc_dlock_list_heads(dlist, &_key);                         \
+       __alloc_dlock_list_heads(dlist, irqsafe, &_key);                \
 })
 
 /*
diff --git a/lib/dlock-list.c b/lib/dlock-list.c
index f3667aa..c1197f5 100644
--- a/lib/dlock-list.c
+++ b/lib/dlock-list.c
@@ -91,8 +91,9 @@ static int __init cpu2idx_init(void)
 
 /**
  * __alloc_dlock_list_heads - Initialize and allocate the list of head entries
- * @dlist: Pointer to the dlock_list_heads structure to be initialized
- * @key  : The lock class key to be used for lockdep
+ * @dlist  : Pointer to the dlock_list_heads structure to be initialized
+ * @irqsafe: IRQ safe mode flag
+ * @key    : The lock class key to be used for lockdep
  * Return: 0 if successful, -ENOMEM if memory allocation error
  *
  * This function does not allocate the dlock_list_heads structure itself. The
@@ -108,7 +109,7 @@ static int __init cpu2idx_init(void)
  * The extra lists will not be ever used as all the cpu2idx entries will be
  * 0 before initialization.
  */
-int __alloc_dlock_list_heads(struct dlock_list_heads *dlist,
+int __alloc_dlock_list_heads(struct dlock_list_heads *dlist, int irqsafe,
                             struct lock_class_key *key)
 {
        int idx, cnt = nr_dlock_lists ? nr_dlock_lists : nr_cpu_ids;
@@ -123,6 +124,7 @@ int __alloc_dlock_list_heads(struct dlock_list_heads *dlist,
 
                INIT_LIST_HEAD(&head->list);
                head->lock = __SPIN_LOCK_UNLOCKED(&head->lock);
+               head->irqsafe = irqsafe;
                lockdep_set_class(&head->lock, key);
        }
        return 0;
@@ -198,13 +200,19 @@ struct dlock_list_head *dlock_list_hash(struct 
dlock_list_heads *dlist,
 void dlock_list_add(struct dlock_list_node *node,
                    struct dlock_list_head *head)
 {
-       /*
-        * There is no need to disable preemption
-        */
-       spin_lock(&head->lock);
-       node->head = head;
-       list_add(&node->list, &head->list);
-       spin_unlock(&head->lock);
+       unsigned long flags;
+
+       if (head->irqsafe) {
+               spin_lock_irqsave(&head->lock, flags);
+               node->head = head;
+               list_add(&node->list, &head->list);
+               spin_unlock_irqrestore(&head->lock, flags);
+       } else {
+               spin_lock(&head->lock);
+               node->head = head;
+               list_add(&node->list, &head->list);
+               spin_unlock(&head->lock);
+       }
 }
 EXPORT_SYMBOL(dlock_list_add);
 
@@ -238,6 +246,7 @@ void dlock_lists_add(struct dlock_list_node *node,
 void dlock_lists_del(struct dlock_list_node *node)
 {
        struct dlock_list_head *head;
+       unsigned long flags = 0;
        bool retry;
 
        do {
@@ -246,7 +255,11 @@ void dlock_lists_del(struct dlock_list_node *node)
                              __func__, (unsigned long)node))
                        return;
 
-               spin_lock(&head->lock);
+               if (head->irqsafe)
+                       spin_lock_irqsave(&head->lock, flags);
+               else
+                       spin_lock(&head->lock);
+
                if (likely(head == node->head)) {
                        list_del_init(&node->list);
                        node->head = NULL;
@@ -259,7 +272,11 @@ void dlock_lists_del(struct dlock_list_node *node)
                         */
                        retry = (node->head != NULL);
                }
-               spin_unlock(&head->lock);
+
+               if (head->irqsafe)
+                       spin_unlock_irqrestore(&head->lock, flags);
+               else
+                       spin_unlock(&head->lock);
        } while (retry);
 }
 EXPORT_SYMBOL(dlock_lists_del);
@@ -282,7 +299,7 @@ struct dlock_list_node *__dlock_list_next_list(struct 
dlock_list_iter *iter)
 
 restart:
        if (iter->entry) {
-               spin_unlock(&iter->entry->lock);
+               dlock_list_unlock(iter);
                iter->entry = NULL;
        }
 
@@ -297,7 +314,8 @@ struct dlock_list_node *__dlock_list_next_list(struct 
dlock_list_iter *iter)
                goto next_list;
 
        head = iter->entry = &iter->head[iter->index];
-       spin_lock(&head->lock);
+       dlock_list_relock(iter);
+
        /*
         * There is a slight chance that the list may become empty just
         * before the lock is acquired. So an additional check is
-- 
1.8.3.1

Reply via email to