Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-15 Thread Joonsoo Kim
On Fri, Aug 12, 2016 at 09:25:37PM +0900, Sergey Senozhatsky wrote:
> On (08/11/16 11:41), Vlastimil Babka wrote:
> > On 08/10/2016 10:14 AM, Sergey Senozhatsky wrote:
> > > > @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, 
> > > > struct page *page,
> > > > size >>= 1;
> > > > VM_BUG_ON_PAGE(bad_range(zone, [size]), 
> > > > [size]);
> > > > 
> > > > -   if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> > > > -   debug_guardpage_enabled() &&
> > > > -   high < debug_guardpage_minorder()) {
> > > > -   /*
> > > > -* Mark as guard pages (or page), that will 
> > > > allow to
> > > > -* merge back to allocator when buddy will be 
> > > > freed.
> > > > -* Corresponding page table entries will not be 
> > > > touched,
> > > > -* pages will stay not present in virtual 
> > > > address space
> > > > -*/
> > > > -   set_page_guard(zone, [size], high, 
> > > > migratetype);
> > > > +   /*
> > > > +* Mark as guard pages (or page), that will allow to
> > > > +* merge back to allocator when buddy will be freed.
> > > > +* Corresponding page table entries will not be touched,
> > > > +* pages will stay not present in virtual address space
> > > > +*/
> > > > +   if (set_page_guard(zone, [size], high, 
> > > > migratetype))
> > > > continue;
> > > > -   }
> > > 
> > > so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
> > > the entire branch -- no set_page_guard() invocation and checks, right? but
> > > now we would call set_page_guard() every time?
> > 
> > No, there's a !CONFIG_DEBUG_PAGEALLOC version of set_page_guard() that
> > returns false (static inline), so this whole if will be eliminated by the
> > compiler, same as before.
> 
> ah, indeed. didn't notice it.

Hello, Sergey and Vlastimil.

I fixed all you commented and sent v2.

Thanks.


Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-15 Thread Joonsoo Kim
On Fri, Aug 12, 2016 at 09:25:37PM +0900, Sergey Senozhatsky wrote:
> On (08/11/16 11:41), Vlastimil Babka wrote:
> > On 08/10/2016 10:14 AM, Sergey Senozhatsky wrote:
> > > > @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, 
> > > > struct page *page,
> > > > size >>= 1;
> > > > VM_BUG_ON_PAGE(bad_range(zone, [size]), 
> > > > [size]);
> > > > 
> > > > -   if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> > > > -   debug_guardpage_enabled() &&
> > > > -   high < debug_guardpage_minorder()) {
> > > > -   /*
> > > > -* Mark as guard pages (or page), that will 
> > > > allow to
> > > > -* merge back to allocator when buddy will be 
> > > > freed.
> > > > -* Corresponding page table entries will not be 
> > > > touched,
> > > > -* pages will stay not present in virtual 
> > > > address space
> > > > -*/
> > > > -   set_page_guard(zone, [size], high, 
> > > > migratetype);
> > > > +   /*
> > > > +* Mark as guard pages (or page), that will allow to
> > > > +* merge back to allocator when buddy will be freed.
> > > > +* Corresponding page table entries will not be touched,
> > > > +* pages will stay not present in virtual address space
> > > > +*/
> > > > +   if (set_page_guard(zone, [size], high, 
> > > > migratetype))
> > > > continue;
> > > > -   }
> > > 
> > > so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
> > > the entire branch -- no set_page_guard() invocation and checks, right? but
> > > now we would call set_page_guard() every time?
> > 
> > No, there's a !CONFIG_DEBUG_PAGEALLOC version of set_page_guard() that
> > returns false (static inline), so this whole if will be eliminated by the
> > compiler, same as before.
> 
> ah, indeed. didn't notice it.

Hello, Sergey and Vlastimil.

I fixed all you commented and sent v2.

Thanks.


Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-12 Thread Sergey Senozhatsky
On (08/11/16 11:41), Vlastimil Babka wrote:
> On 08/10/2016 10:14 AM, Sergey Senozhatsky wrote:
> > > @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, 
> > > struct page *page,
> > >   size >>= 1;
> > >   VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);
> > > 
> > > - if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> > > - debug_guardpage_enabled() &&
> > > - high < debug_guardpage_minorder()) {
> > > - /*
> > > -  * Mark as guard pages (or page), that will allow to
> > > -  * merge back to allocator when buddy will be freed.
> > > -  * Corresponding page table entries will not be touched,
> > > -  * pages will stay not present in virtual address space
> > > -  */
> > > - set_page_guard(zone, [size], high, migratetype);
> > > + /*
> > > +  * Mark as guard pages (or page), that will allow to
> > > +  * merge back to allocator when buddy will be freed.
> > > +  * Corresponding page table entries will not be touched,
> > > +  * pages will stay not present in virtual address space
> > > +  */
> > > + if (set_page_guard(zone, [size], high, migratetype))
> > >   continue;
> > > - }
> > 
> > so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
> > the entire branch -- no set_page_guard() invocation and checks, right? but
> > now we would call set_page_guard() every time?
> 
> No, there's a !CONFIG_DEBUG_PAGEALLOC version of set_page_guard() that
> returns false (static inline), so this whole if will be eliminated by the
> compiler, same as before.

ah, indeed. didn't notice it.

-ss


Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-12 Thread Sergey Senozhatsky
On (08/11/16 11:41), Vlastimil Babka wrote:
> On 08/10/2016 10:14 AM, Sergey Senozhatsky wrote:
> > > @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, 
> > > struct page *page,
> > >   size >>= 1;
> > >   VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);
> > > 
> > > - if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> > > - debug_guardpage_enabled() &&
> > > - high < debug_guardpage_minorder()) {
> > > - /*
> > > -  * Mark as guard pages (or page), that will allow to
> > > -  * merge back to allocator when buddy will be freed.
> > > -  * Corresponding page table entries will not be touched,
> > > -  * pages will stay not present in virtual address space
> > > -  */
> > > - set_page_guard(zone, [size], high, migratetype);
> > > + /*
> > > +  * Mark as guard pages (or page), that will allow to
> > > +  * merge back to allocator when buddy will be freed.
> > > +  * Corresponding page table entries will not be touched,
> > > +  * pages will stay not present in virtual address space
> > > +  */
> > > + if (set_page_guard(zone, [size], high, migratetype))
> > >   continue;
> > > - }
> > 
> > so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
> > the entire branch -- no set_page_guard() invocation and checks, right? but
> > now we would call set_page_guard() every time?
> 
> No, there's a !CONFIG_DEBUG_PAGEALLOC version of set_page_guard() that
> returns false (static inline), so this whole if will be eliminated by the
> compiler, same as before.

ah, indeed. didn't notice it.

-ss


Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-11 Thread Vlastimil Babka

On 08/10/2016 10:14 AM, Sergey Senozhatsky wrote:

@@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct 
page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);

-   if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
-   debug_guardpage_enabled() &&
-   high < debug_guardpage_minorder()) {
-   /*
-* Mark as guard pages (or page), that will allow to
-* merge back to allocator when buddy will be freed.
-* Corresponding page table entries will not be touched,
-* pages will stay not present in virtual address space
-*/
-   set_page_guard(zone, [size], high, migratetype);
+   /*
+* Mark as guard pages (or page), that will allow to
+* merge back to allocator when buddy will be freed.
+* Corresponding page table entries will not be touched,
+* pages will stay not present in virtual address space
+*/
+   if (set_page_guard(zone, [size], high, migratetype))
continue;
-   }


so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
the entire branch -- no set_page_guard() invocation and checks, right? but
now we would call set_page_guard() every time?


No, there's a !CONFIG_DEBUG_PAGEALLOC version of set_page_guard() that 
returns false (static inline), so this whole if will be eliminated by 
the compiler, same as before.




Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-11 Thread Vlastimil Babka

On 08/10/2016 10:14 AM, Sergey Senozhatsky wrote:

@@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct 
page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);

-   if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
-   debug_guardpage_enabled() &&
-   high < debug_guardpage_minorder()) {
-   /*
-* Mark as guard pages (or page), that will allow to
-* merge back to allocator when buddy will be freed.
-* Corresponding page table entries will not be touched,
-* pages will stay not present in virtual address space
-*/
-   set_page_guard(zone, [size], high, migratetype);
+   /*
+* Mark as guard pages (or page), that will allow to
+* merge back to allocator when buddy will be freed.
+* Corresponding page table entries will not be touched,
+* pages will stay not present in virtual address space
+*/
+   if (set_page_guard(zone, [size], high, migratetype))
continue;
-   }


so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
the entire branch -- no set_page_guard() invocation and checks, right? but
now we would call set_page_guard() every time?


No, there's a !CONFIG_DEBUG_PAGEALLOC version of set_page_guard() that 
returns false (static inline), so this whole if will be eliminated by 
the compiler, same as before.




Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-11 Thread Vlastimil Babka

On 08/10/2016 08:16 AM, js1...@gmail.com wrote:

From: Joonsoo Kim 

We can make code clean by moving decision condition
for set_page_guard() into set_page_guard() itself. It will
help code readability. There is no functional change.

Signed-off-by: Joonsoo Kim 


Acked-by: Vlastimil Babka 



Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-11 Thread Vlastimil Babka

On 08/10/2016 08:16 AM, js1...@gmail.com wrote:

From: Joonsoo Kim 

We can make code clean by moving decision condition
for set_page_guard() into set_page_guard() itself. It will
help code readability. There is no functional change.

Signed-off-by: Joonsoo Kim 


Acked-by: Vlastimil Babka 



Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-10 Thread Sergey Senozhatsky
Hello,

On (08/10/16 15:16), js1...@gmail.com wrote:
[..]
> -static inline void set_page_guard(struct zone *zone, struct page *page,
> +static inline bool set_page_guard(struct zone *zone, struct page *page,
>   unsigned int order, int migratetype)
>  {
>   struct page_ext *page_ext;
>  
>   if (!debug_guardpage_enabled())
> - return;
> + return false;
> +
> + if (order >= debug_guardpage_minorder())
> + return false;
>  
>   page_ext = lookup_page_ext(page);
>   if (unlikely(!page_ext))
> - return;
> + return false;
>  
>   __set_bit(PAGE_EXT_DEBUG_GUARD, _ext->flags);
>  
> @@ -656,6 +659,8 @@ static inline void set_page_guard(struct zone *zone, 
> struct page *page,
>   set_page_private(page, order);
>   /* Guard pages are not available for any usage */
>   __mod_zone_freepage_state(zone, -(1 << order), migratetype);
> +
> + return true;
>  }
>  
>  static inline void clear_page_guard(struct zone *zone, struct page *page,
> @@ -678,8 +683,8 @@ static inline void clear_page_guard(struct zone *zone, 
> struct page *page,
>  }
>  #else
>  struct page_ext_operations debug_guardpage_ops = { NULL, };
> -static inline void set_page_guard(struct zone *zone, struct page *page,
> - unsigned int order, int migratetype) {}
> +static inline bool set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) { return false; }
>  static inline void clear_page_guard(struct zone *zone, struct page *page,
>   unsigned int order, int migratetype) {}
>  #endif
> @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct 
> page *page,
>   size >>= 1;
>   VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);
>  
> - if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> - debug_guardpage_enabled() &&
> - high < debug_guardpage_minorder()) {
> - /*
> -  * Mark as guard pages (or page), that will allow to
> -  * merge back to allocator when buddy will be freed.
> -  * Corresponding page table entries will not be touched,
> -  * pages will stay not present in virtual address space
> -  */
> - set_page_guard(zone, [size], high, migratetype);
> + /*
> +  * Mark as guard pages (or page), that will allow to
> +  * merge back to allocator when buddy will be freed.
> +  * Corresponding page table entries will not be touched,
> +  * pages will stay not present in virtual address space
> +  */
> + if (set_page_guard(zone, [size], high, migratetype))
>   continue;
> - }

so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
the entire branch -- no set_page_guard() invocation and checks, right? but
now we would call set_page_guard() every time?

-ss

> +
>   list_add([size].lru, >free_list[migratetype]);
>   area->nr_free++;
>   set_page_order([size], high);



Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-10 Thread Sergey Senozhatsky
Hello,

On (08/10/16 15:16), js1...@gmail.com wrote:
[..]
> -static inline void set_page_guard(struct zone *zone, struct page *page,
> +static inline bool set_page_guard(struct zone *zone, struct page *page,
>   unsigned int order, int migratetype)
>  {
>   struct page_ext *page_ext;
>  
>   if (!debug_guardpage_enabled())
> - return;
> + return false;
> +
> + if (order >= debug_guardpage_minorder())
> + return false;
>  
>   page_ext = lookup_page_ext(page);
>   if (unlikely(!page_ext))
> - return;
> + return false;
>  
>   __set_bit(PAGE_EXT_DEBUG_GUARD, _ext->flags);
>  
> @@ -656,6 +659,8 @@ static inline void set_page_guard(struct zone *zone, 
> struct page *page,
>   set_page_private(page, order);
>   /* Guard pages are not available for any usage */
>   __mod_zone_freepage_state(zone, -(1 << order), migratetype);
> +
> + return true;
>  }
>  
>  static inline void clear_page_guard(struct zone *zone, struct page *page,
> @@ -678,8 +683,8 @@ static inline void clear_page_guard(struct zone *zone, 
> struct page *page,
>  }
>  #else
>  struct page_ext_operations debug_guardpage_ops = { NULL, };
> -static inline void set_page_guard(struct zone *zone, struct page *page,
> - unsigned int order, int migratetype) {}
> +static inline bool set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) { return false; }
>  static inline void clear_page_guard(struct zone *zone, struct page *page,
>   unsigned int order, int migratetype) {}
>  #endif
> @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct 
> page *page,
>   size >>= 1;
>   VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);
>  
> - if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> - debug_guardpage_enabled() &&
> - high < debug_guardpage_minorder()) {
> - /*
> -  * Mark as guard pages (or page), that will allow to
> -  * merge back to allocator when buddy will be freed.
> -  * Corresponding page table entries will not be touched,
> -  * pages will stay not present in virtual address space
> -  */
> - set_page_guard(zone, [size], high, migratetype);
> + /*
> +  * Mark as guard pages (or page), that will allow to
> +  * merge back to allocator when buddy will be freed.
> +  * Corresponding page table entries will not be touched,
> +  * pages will stay not present in virtual address space
> +  */
> + if (set_page_guard(zone, [size], high, migratetype))
>   continue;
> - }

so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
the entire branch -- no set_page_guard() invocation and checks, right? but
now we would call set_page_guard() every time?

-ss

> +
>   list_add([size].lru, >free_list[migratetype]);
>   area->nr_free++;
>   set_page_order([size], high);



[PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-10 Thread js1304
From: Joonsoo Kim 

We can make code clean by moving decision condition
for set_page_guard() into set_page_guard() itself. It will
help code readability. There is no functional change.

Signed-off-by: Joonsoo Kim 
---
 mm/page_alloc.c | 34 ++
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 277c3d0..5e7944b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -638,17 +638,20 @@ static int __init debug_guardpage_minorder_setup(char 
*buf)
 }
 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 
-static inline void set_page_guard(struct zone *zone, struct page *page,
+static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
 {
struct page_ext *page_ext;
 
if (!debug_guardpage_enabled())
-   return;
+   return false;
+
+   if (order >= debug_guardpage_minorder())
+   return false;
 
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
-   return;
+   return false;
 
__set_bit(PAGE_EXT_DEBUG_GUARD, _ext->flags);
 
@@ -656,6 +659,8 @@ static inline void set_page_guard(struct zone *zone, struct 
page *page,
set_page_private(page, order);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
+
+   return true;
 }
 
 static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -678,8 +683,8 @@ static inline void clear_page_guard(struct zone *zone, 
struct page *page,
 }
 #else
 struct page_ext_operations debug_guardpage_ops = { NULL, };
-static inline void set_page_guard(struct zone *zone, struct page *page,
-   unsigned int order, int migratetype) {}
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+   unsigned int order, int migratetype) { return false; }
 static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
 #endif
@@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct 
page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);
 
-   if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
-   debug_guardpage_enabled() &&
-   high < debug_guardpage_minorder()) {
-   /*
-* Mark as guard pages (or page), that will allow to
-* merge back to allocator when buddy will be freed.
-* Corresponding page table entries will not be touched,
-* pages will stay not present in virtual address space
-*/
-   set_page_guard(zone, [size], high, migratetype);
+   /*
+* Mark as guard pages (or page), that will allow to
+* merge back to allocator when buddy will be freed.
+* Corresponding page table entries will not be touched,
+* pages will stay not present in virtual address space
+*/
+   if (set_page_guard(zone, [size], high, migratetype))
continue;
-   }
+
list_add([size].lru, >free_list[migratetype]);
area->nr_free++;
set_page_order([size], high);
-- 
1.9.1



[PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code

2016-08-10 Thread js1304
From: Joonsoo Kim 

We can make code clean by moving decision condition
for set_page_guard() into set_page_guard() itself. It will
help code readability. There is no functional change.

Signed-off-by: Joonsoo Kim 
---
 mm/page_alloc.c | 34 ++
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 277c3d0..5e7944b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -638,17 +638,20 @@ static int __init debug_guardpage_minorder_setup(char 
*buf)
 }
 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 
-static inline void set_page_guard(struct zone *zone, struct page *page,
+static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
 {
struct page_ext *page_ext;
 
if (!debug_guardpage_enabled())
-   return;
+   return false;
+
+   if (order >= debug_guardpage_minorder())
+   return false;
 
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
-   return;
+   return false;
 
__set_bit(PAGE_EXT_DEBUG_GUARD, _ext->flags);
 
@@ -656,6 +659,8 @@ static inline void set_page_guard(struct zone *zone, struct 
page *page,
set_page_private(page, order);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
+
+   return true;
 }
 
 static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -678,8 +683,8 @@ static inline void clear_page_guard(struct zone *zone, 
struct page *page,
 }
 #else
 struct page_ext_operations debug_guardpage_ops = { NULL, };
-static inline void set_page_guard(struct zone *zone, struct page *page,
-   unsigned int order, int migratetype) {}
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+   unsigned int order, int migratetype) { return false; }
 static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
 #endif
@@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct 
page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, [size]), [size]);
 
-   if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
-   debug_guardpage_enabled() &&
-   high < debug_guardpage_minorder()) {
-   /*
-* Mark as guard pages (or page), that will allow to
-* merge back to allocator when buddy will be freed.
-* Corresponding page table entries will not be touched,
-* pages will stay not present in virtual address space
-*/
-   set_page_guard(zone, [size], high, migratetype);
+   /*
+* Mark as guard pages (or page), that will allow to
+* merge back to allocator when buddy will be freed.
+* Corresponding page table entries will not be touched,
+* pages will stay not present in virtual address space
+*/
+   if (set_page_guard(zone, [size], high, migratetype))
continue;
-   }
+
list_add([size].lru, >free_list[migratetype]);
area->nr_free++;
set_page_order([size], high);
-- 
1.9.1