[PATCH v2 1/4] lockdep: Apply crossrelease to PG_locked locks

2017-12-03 Thread Byungchul Park
Although lock_page() and its family can cause deadlock, lockdep have not
worked with them, becasue unlock_page() might be called in a different
context from the acquire context, which violated lockdep's assumption.

Now CONFIG_LOCKDEP_CROSSRELEASE has been introduced, lockdep can work
with page locks.

Signed-off-by: Byungchul Park 
---
 include/linux/mm_types.h |   8 
 include/linux/pagemap.h  | 101 ---
 lib/Kconfig.debug|   7 
 mm/filemap.c |   4 +-
 mm/page_alloc.c  |   3 ++
 5 files changed, 115 insertions(+), 8 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c85f11d..263b861 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -17,6 +17,10 @@
 
 #include 
 
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+#include 
+#endif
+
 #ifndef AT_VECTOR_SIZE_ARCH
 #define AT_VECTOR_SIZE_ARCH 0
 #endif
@@ -218,6 +222,10 @@ struct page {
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
 #endif
+
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+   struct lockdep_map_cross map;
+#endif
 }
 /*
  * The struct page can be forced to be double word aligned so that atomic ops
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e08b533..35b4f67 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -15,6 +15,9 @@
 #include 
 #include  /* for in_interrupt() */
 #include 
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+#include 
+#endif
 
 /*
  * Bits in mapping->flags.
@@ -457,26 +460,91 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
return pgoff;
 }
 
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+#define lock_page_init(p)  \
+do {   \
+   static struct lock_class_key __key; \
+   lockdep_init_map_crosslock((struct lockdep_map *)&(p)->map, \
+   "(PG_locked)" #p, &__key, 0);   \
+} while (0)
+
+static inline void lock_page_acquire(struct page *page, int try)
+{
+   page = compound_head(page);
+   lock_acquire_exclusive((struct lockdep_map *)>map, 0,
+  try, NULL, _RET_IP_);
+}
+
+static inline void lock_page_release(struct page *page)
+{
+   page = compound_head(page);
+   /*
+* lock_commit_crosslock() is necessary for crosslocks.
+*/
+   lock_commit_crosslock((struct lockdep_map *)>map);
+   lock_release((struct lockdep_map *)>map, 0, _RET_IP_);
+}
+#else
+static inline void lock_page_init(struct page *page) {}
+static inline void lock_page_free(struct page *page) {}
+static inline void lock_page_acquire(struct page *page, int try) {}
+static inline void lock_page_release(struct page *page) {}
+#endif
+
 extern void __lock_page(struct page *page);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
-extern void unlock_page(struct page *page);
+extern void do_raw_unlock_page(struct page *page);
 
-static inline int trylock_page(struct page *page)
+static inline void unlock_page(struct page *page)
+{
+   lock_page_release(page);
+   do_raw_unlock_page(page);
+}
+
+static inline int do_raw_trylock_page(struct page *page)
 {
page = compound_head(page);
return (likely(!test_and_set_bit_lock(PG_locked, >flags)));
 }
 
+static inline int trylock_page(struct page *page)
+{
+   if (do_raw_trylock_page(page)) {
+   lock_page_acquire(page, 1);
+   return 1;
+   }
+   return 0;
+}
+
 /*
  * lock_page may only be called if we have the page's inode pinned.
  */
 static inline void lock_page(struct page *page)
 {
might_sleep();
-   if (!trylock_page(page))
+
+   if (!do_raw_trylock_page(page))
__lock_page(page);
+   /*
+* acquire() must be after actual lock operation for crosslocks.
+* This way a crosslock and current lock can be ordered like:
+*
+*  CONTEXT 1   CONTEXT 2
+*  -   -
+*  lock A (cross)
+*  acquire A
+*X = atomic_inc_return(_gen_id)
+*  ~
+*  acquire B
+*Y = atomic_read_acquire(_gen_id)
+*  lock B
+*
+* so that 'lock A and then lock B' can be seen globally,
+* if X <= Y.
+*/
+   lock_page_acquire(page, 0);
 }
 
 /*
@@ -486,9 +554,20 @@ static inline void lock_page(struct page *page)
  */
 static inline int lock_page_killable(struct page *page)
 {
+   int ret;
+
might_sleep();
-   if (!trylock_page(page))
-   return 

[PATCH v2 1/4] lockdep: Apply crossrelease to PG_locked locks

2017-12-03 Thread Byungchul Park
Although lock_page() and its family can cause deadlock, lockdep have not
worked with them, becasue unlock_page() might be called in a different
context from the acquire context, which violated lockdep's assumption.

Now CONFIG_LOCKDEP_CROSSRELEASE has been introduced, lockdep can work
with page locks.

Signed-off-by: Byungchul Park 
---
 include/linux/mm_types.h |   8 
 include/linux/pagemap.h  | 101 ---
 lib/Kconfig.debug|   7 
 mm/filemap.c |   4 +-
 mm/page_alloc.c  |   3 ++
 5 files changed, 115 insertions(+), 8 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c85f11d..263b861 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -17,6 +17,10 @@
 
 #include 
 
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+#include 
+#endif
+
 #ifndef AT_VECTOR_SIZE_ARCH
 #define AT_VECTOR_SIZE_ARCH 0
 #endif
@@ -218,6 +222,10 @@ struct page {
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
 #endif
+
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+   struct lockdep_map_cross map;
+#endif
 }
 /*
  * The struct page can be forced to be double word aligned so that atomic ops
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e08b533..35b4f67 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -15,6 +15,9 @@
 #include 
 #include  /* for in_interrupt() */
 #include 
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+#include 
+#endif
 
 /*
  * Bits in mapping->flags.
@@ -457,26 +460,91 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
return pgoff;
 }
 
+#ifdef CONFIG_LOCKDEP_PAGELOCK
+#define lock_page_init(p)  \
+do {   \
+   static struct lock_class_key __key; \
+   lockdep_init_map_crosslock((struct lockdep_map *)&(p)->map, \
+   "(PG_locked)" #p, &__key, 0);   \
+} while (0)
+
+static inline void lock_page_acquire(struct page *page, int try)
+{
+   page = compound_head(page);
+   lock_acquire_exclusive((struct lockdep_map *)>map, 0,
+  try, NULL, _RET_IP_);
+}
+
+static inline void lock_page_release(struct page *page)
+{
+   page = compound_head(page);
+   /*
+* lock_commit_crosslock() is necessary for crosslocks.
+*/
+   lock_commit_crosslock((struct lockdep_map *)>map);
+   lock_release((struct lockdep_map *)>map, 0, _RET_IP_);
+}
+#else
+static inline void lock_page_init(struct page *page) {}
+static inline void lock_page_free(struct page *page) {}
+static inline void lock_page_acquire(struct page *page, int try) {}
+static inline void lock_page_release(struct page *page) {}
+#endif
+
 extern void __lock_page(struct page *page);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
-extern void unlock_page(struct page *page);
+extern void do_raw_unlock_page(struct page *page);
 
-static inline int trylock_page(struct page *page)
+static inline void unlock_page(struct page *page)
+{
+   lock_page_release(page);
+   do_raw_unlock_page(page);
+}
+
+static inline int do_raw_trylock_page(struct page *page)
 {
page = compound_head(page);
return (likely(!test_and_set_bit_lock(PG_locked, >flags)));
 }
 
+static inline int trylock_page(struct page *page)
+{
+   if (do_raw_trylock_page(page)) {
+   lock_page_acquire(page, 1);
+   return 1;
+   }
+   return 0;
+}
+
 /*
  * lock_page may only be called if we have the page's inode pinned.
  */
 static inline void lock_page(struct page *page)
 {
might_sleep();
-   if (!trylock_page(page))
+
+   if (!do_raw_trylock_page(page))
__lock_page(page);
+   /*
+* acquire() must be after actual lock operation for crosslocks.
+* This way a crosslock and current lock can be ordered like:
+*
+*  CONTEXT 1   CONTEXT 2
+*  -   -
+*  lock A (cross)
+*  acquire A
+*X = atomic_inc_return(_gen_id)
+*  ~
+*  acquire B
+*Y = atomic_read_acquire(_gen_id)
+*  lock B
+*
+* so that 'lock A and then lock B' can be seen globally,
+* if X <= Y.
+*/
+   lock_page_acquire(page, 0);
 }
 
 /*
@@ -486,9 +554,20 @@ static inline void lock_page(struct page *page)
  */
 static inline int lock_page_killable(struct page *page)
 {
+   int ret;
+
might_sleep();
-   if (!trylock_page(page))
-   return __lock_page_killable(page);
+
+   if