With new refcounting pte entries can point to tail pages. It's doesn't
make much sense to mark tail page locked -- we need to protect whole
compound page.

This patch adjust helpers related to PG_locked to operate on head page.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 include/linux/page-flags.h |  3 ++-
 include/linux/pagemap.h    |  5 +++++
 mm/filemap.c               | 11 +++++++----
 mm/slub.c                  |  2 ++
 4 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c851ff92d5b3..58b98bced299 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -207,7 +207,8 @@ static inline int __TestClearPage##uname(struct page *page) 
{ return 0; }
 
 struct page;   /* forward declaration */
 
-TESTPAGEFLAG(Locked, locked)
+#define PageLocked(page) test_bit(PG_locked, &compound_head(page)->flags)
+
 PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
        __SETPAGEFLAG(Referenced, referenced)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f7065c..ad6da4e49555 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -428,16 +428,19 @@ extern void unlock_page(struct page *page);
 
 static inline void __set_page_locked(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        __set_bit(PG_locked, &page->flags);
 }
 
 static inline void __clear_page_locked(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        __clear_bit(PG_locked, &page->flags);
 }
 
 static inline int trylock_page(struct page *page)
 {
+       page = compound_head(page);
        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 }
 
@@ -490,6 +493,7 @@ extern int wait_on_page_bit_killable_timeout(struct page 
*page,
 
 static inline int wait_on_page_locked_killable(struct page *page)
 {
+       page = compound_head(page);
        if (PageLocked(page))
                return wait_on_page_bit_killable(page, PG_locked);
        return 0;
@@ -510,6 +514,7 @@ static inline void wake_up_page(struct page *page, int bit)
  */
 static inline void wait_on_page_locked(struct page *page)
 {
+       page = compound_head(page);
        if (PageLocked(page))
                wait_on_page_bit(page, PG_locked);
 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 434dba317400..a0c3a57d29c9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -743,6 +743,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  */
 void unlock_page(struct page *page)
 {
+       page = compound_head(page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        clear_bit_unlock(PG_locked, &page->flags);
        smp_mb__after_atomic();
@@ -807,18 +808,20 @@ EXPORT_SYMBOL_GPL(page_endio);
  */
 void __lock_page(struct page *page)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       struct page *page_head = compound_head(page);
+       DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 
-       __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
+       __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_page);
 
 int __lock_page_killable(struct page *page)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       struct page *page_head = compound_head(page);
+       DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 
-       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+       return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
                                        bit_wait_io, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
diff --git a/mm/slub.c b/mm/slub.c
index 6832c4eab104..37205f648294 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -338,11 +338,13 @@ static inline int oo_objects(struct 
kmem_cache_order_objects x)
  */
 static __always_inline void slab_lock(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        bit_spin_lock(PG_locked, &page->flags);
 }
 
 static __always_inline void slab_unlock(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        __bit_spin_unlock(PG_locked, &page->flags);
 }
 
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to