Make functions that test page bits return a bool, not an int.  This means
that the value is definitely 0 or 1 if they're used in arithmetic, rather
than rely on test_bit() and friends to return this (though they probably
should).

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Linus Torvalds <torva...@linux-foundation.org>
cc: Matthew Wilcox <wi...@infradead.org>
cc: Andrew Morton <a...@linux-foundation.org>
cc: linux...@kvack.org
cc: linux-fsde...@vger.kernel.org
---

 include/linux/page-flags.h |   50 ++++++++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 04a34c08e0a6..4ff7de61b13d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -188,18 +188,18 @@ static inline struct page *compound_head(struct page 
*page)
        return page;
 }
 
-static __always_inline int PageTail(struct page *page)
+static __always_inline bool PageTail(struct page *page)
 {
        return READ_ONCE(page->compound_head) & 1;
 }
 
-static __always_inline int PageCompound(struct page *page)
+static __always_inline bool PageCompound(struct page *page)
 {
        return test_bit(PG_head, &page->flags) || PageTail(page);
 }
 
 #define        PAGE_POISON_PATTERN     -1l
-static inline int PagePoisoned(const struct page *page)
+static inline bool PagePoisoned(const struct page *page)
 {
        return page->flags == PAGE_POISON_PATTERN;
 }
@@ -260,7 +260,7 @@ static inline void page_init_poison(struct page *page, 
size_t size)
  * Macros to create function definitions for page flags
  */
 #define TESTPAGEFLAG(uname, lname, policy)                             \
-static __always_inline int Page##uname(struct page *page)              \
+static __always_inline bool Page##uname(struct page *page)             \
        { return test_bit(PG_##lname, &policy(page, 0)->flags); }
 
 #define SETPAGEFLAG(uname, lname, policy)                              \
@@ -280,11 +280,11 @@ static __always_inline void __ClearPage##uname(struct 
page *page) \
        { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define TESTSETFLAG(uname, lname, policy)                              \
-static __always_inline int TestSetPage##uname(struct page *page)       \
+static __always_inline bool TestSetPage##uname(struct page *page)      \
        { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define TESTCLEARFLAG(uname, lname, policy)                            \
-static __always_inline int TestClearPage##uname(struct page *page)     \
+static __always_inline bool TestClearPage##uname(struct page *page)    \
        { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define PAGEFLAG(uname, lname, policy)                                 \
@@ -302,7 +302,7 @@ static __always_inline int TestClearPage##uname(struct page 
*page)  \
        TESTCLEARFLAG(uname, lname, policy)
 
 #define TESTPAGEFLAG_FALSE(uname)                                      \
-static inline int Page##uname(const struct page *page) { return 0; }
+static inline bool Page##uname(const struct page *page) { return false; }
 
 #define SETPAGEFLAG_NOOP(uname)                                                
\
 static inline void SetPage##uname(struct page *page) {  }
@@ -314,10 +314,10 @@ static inline void ClearPage##uname(struct page *page) {  
}
 static inline void __ClearPage##uname(struct page *page) {  }
 
 #define TESTSETFLAG_FALSE(uname)                                       \
-static inline int TestSetPage##uname(struct page *page) { return 0; }
+static inline bool TestSetPage##uname(struct page *page) { return false; }
 
 #define TESTCLEARFLAG_FALSE(uname)                                     \
-static inline int TestClearPage##uname(struct page *page) { return 0; }
+static inline bool TestClearPage##uname(struct page *page) { return false; }
 
 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)                        
\
        SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
@@ -393,7 +393,7 @@ PAGEFLAG_FALSE(HighMem)
 #endif
 
 #ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline bool PageSwapCache(struct page *page)
 {
 #ifdef CONFIG_THP_SWAP
        page = compound_head(page);
@@ -473,18 +473,18 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
 #define PAGE_MAPPING_KSM       (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 #define PAGE_MAPPING_FLAGS     (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 
-static __always_inline int PageMappingFlags(struct page *page)
+static __always_inline bool PageMappingFlags(struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
 }
 
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool PageAnon(struct page *page)
 {
        page = compound_head(page);
        return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 }
 
-static __always_inline int __PageMovable(struct page *page)
+static __always_inline bool __PageMovable(struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
                                PAGE_MAPPING_MOVABLE;
@@ -497,7 +497,7 @@ static __always_inline int __PageMovable(struct page *page)
  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
  * anon_vma, but to that page's node of the stable tree.
  */
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool PageKsm(struct page *page)
 {
        page = compound_head(page);
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -509,9 +509,9 @@ TESTPAGEFLAG_FALSE(Ksm)
 
 u64 stable_page_flags(struct page *page);
 
-static inline int PageUptodate(struct page *page)
+static inline bool PageUptodate(struct page *page)
 {
-       int ret;
+       bool ret;
        page = compound_head(page);
        ret = test_bit(PG_uptodate, &(page)->flags);
        /*
@@ -607,7 +607,7 @@ TESTPAGEFLAG_FALSE(HeadHuge)
  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
  * called only in the core VM paths where hugetlbfs pages can't exist.
  */
-static inline int PageTransHuge(struct page *page)
+static inline bool PageTransHuge(struct page *page)
 {
        VM_BUG_ON_PAGE(PageTail(page), page);
        return PageHead(page);
@@ -618,7 +618,7 @@ static inline int PageTransHuge(struct page *page)
  * and hugetlbfs pages, so it should only be called when it's known
  * that hugetlbfs pages aren't involved.
  */
-static inline int PageTransCompound(struct page *page)
+static inline bool PageTransCompound(struct page *page)
 {
        return PageCompound(page);
 }
@@ -644,12 +644,12 @@ static inline int PageTransCompound(struct page *page)
  * mapped in the current process so comparing subpage's _mapcount to
  * compound_mapcount to filter out PTE mapped case.
  */
-static inline int PageTransCompoundMap(struct page *page)
+static inline bool PageTransCompoundMap(struct page *page)
 {
        struct page *head;
 
        if (!PageTransCompound(page))
-               return 0;
+               return false;
 
        if (PageAnon(page))
                return atomic_read(&page->_mapcount) < 0;
@@ -665,7 +665,7 @@ static inline int PageTransCompoundMap(struct page *page)
  * and hugetlbfs pages, so it should only be called when it's known
  * that hugetlbfs pages aren't involved.
  */
-static inline int PageTransTail(struct page *page)
+static inline bool PageTransTail(struct page *page)
 {
        return PageTail(page);
 }
@@ -714,13 +714,13 @@ PAGEFLAG_FALSE(DoubleMap)
 #define PageType(page, flag)                                           \
        ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
 
-static inline int page_has_type(struct page *page)
+static inline bool page_has_type(struct page *page)
 {
        return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
 }
 
 #define PAGE_TYPE_OPS(uname, lname)                                    \
-static __always_inline int Page##uname(struct page *page)              \
+static __always_inline bool Page##uname(struct page *page)             \
 {                                                                      \
        return PageType(page, PG_##lname);                              \
 }                                                                      \
@@ -778,7 +778,7 @@ __PAGEFLAG(Isolated, isolated, PF_ANY);
  * If network-based swap is enabled, sl*b must keep track of whether pages
  * were allocated from pfmemalloc reserves.
  */
-static inline int PageSlabPfmemalloc(struct page *page)
+static inline bool PageSlabPfmemalloc(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageSlab(page), page);
        return PageActive(page);
@@ -839,7 +839,7 @@ static inline void ClearPageSlabPfmemalloc(struct page 
*page)
  * Determine if a page has private stuff, indicating that release routines
  * should be invoked upon it.
  */
-static inline int page_has_private(struct page *page)
+static inline bool page_has_private(struct page *page)
 {
        return !!(page->flags & PAGE_FLAGS_PRIVATE);
 }


Reply via email to