Re: [PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-03 Thread Naoya Horiguchi
On Thu, Mar 03, 2016 at 01:40:51PM +0300, Kirill A. Shutemov wrote:
> On Thu, Mar 03, 2016 at 04:41:50PM +0900, Naoya Horiguchi wrote:
> > This patch prepares thp migration's core code. These code will be open when
> > unmap_and_move() stops unconditionally splitting thp and get_new_page() 
> > starts
> > to allocate destination thps.
> >
> > Signed-off-by: Naoya Horiguchi 
> > ---
> >  arch/x86/include/asm/pgtable.h| 11 ++
> >  arch/x86/include/asm/pgtable_64.h |  2 +
> >  include/linux/swapops.h   | 62 +++
> >  mm/huge_memory.c  | 78 
> > +++
> >  mm/migrate.c  | 23 
> >  5 files changed, 176 insertions(+)
> >
> > diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> > index 0687c47..0df9afe 100644
> > --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h
> > +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> > @@ -515,6 +515,17 @@ static inline int pmd_present(pmd_t pmd)
> > return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
> >  }
> >
> > +/*
> > + * Unlike pmd_present(), __pmd_present() checks only _PAGE_PRESENT bit.
> > + * Combined with is_migration_entry(), this routine is used to detect pmd
> > + * migration entries. To make it work fine, callers should make sure that
> > + * pmd_trans_huge() returns true beforehand.
> > + */
>
> Hm. I don't this this would fly. What pevents false positive for PROT_NONE
> pmds?

Nothing actually if we use __pmd_present alone. __pmd_present() is now used
only via is_pmd_migration_entry() combined with is_migration_entry(), and
is_migration_entry() should return false for PROT_NONE pmds (because
is_migration_entry() requires characteristic bits SWP_MIGRATION_READ|WRITE,
and they aren't compatible.) But I admit it might not be robust enough.

>
> I guess the problem is _PAGE_PSE, right? I don't really understand why we
> need it in pmd_present().

Yes, _PAGE_PSE in pmd_present() makes this branching harder/complicated.
Some simplification seems necessary.

>
> Andrea?
>
> > +static inline int __pmd_present(pmd_t pmd)
> > +{
> > +   return pmd_flags(pmd) & _PAGE_PRESENT;
> > +}
> > +
> >  #ifdef CONFIG_NUMA_BALANCING
> >  /*
> >   * These work without NUMA balancing but the kernel does not care. See the
> > diff --git 
> > v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> > index 2ee7811..df869d0 100644
> > --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h
> > +++ 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> > @@ -153,7 +153,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
> >  ((type) << (_PAGE_BIT_PRESENT + 1)) \
> >  | ((offset) << SWP_OFFSET_SHIFT) })
> >  #define __pte_to_swp_entry(pte)((swp_entry_t) { pte_val((pte)) 
> > })
> > +#define __pmd_to_swp_entry(pte)((swp_entry_t) { pmd_val((pmd)) 
> > })
> >  #define __swp_entry_to_pte(x)  ((pte_t) { .pte = (x).val })
> > +#define __swp_entry_to_pmd(x)  ((pmd_t) { .pmd = (x).val })
> >
> >  extern int kern_addr_valid(unsigned long addr);
> >  extern void cleanup_highmap(void);
> > diff --git v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> > index 5c3a5f3..b402a2c 100644
> > --- v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h
> > +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> > @@ -163,6 +163,68 @@ static inline int is_write_migration_entry(swp_entry_t 
> > entry)
> >
> >  #endif
> >
> > +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> > +extern int set_pmd_migration_entry(struct page *page,
> > +   struct mm_struct *mm, unsigned long address);
> > +
> > +extern int remove_migration_pmd(struct page *new,
> > +   struct vm_area_struct *vma, unsigned long addr, void *old);
> > +
> > +extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
> > +
> > +static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
> > +{
> > +   swp_entry_t arch_entry;
> > +
> > +   arch_entry = __pmd_to_swp_entry(pmd);
> > +   return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
> > +}
> > +
> > +static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
> > +{
> > +   swp_entry_t arch_entry;
> > +
> > +   arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
> > +   return __swp_entry_to_pmd(arch_entry);
> > +}
> > +
> > +static inline int is_pmd_migration_entry(pmd_t pmd)
> > +{
> > +   return !__pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
> > +}
> > +#else
> > +static inline int 

Re: [PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-03 Thread Naoya Horiguchi
On Thu, Mar 03, 2016 at 01:40:51PM +0300, Kirill A. Shutemov wrote:
> On Thu, Mar 03, 2016 at 04:41:50PM +0900, Naoya Horiguchi wrote:
> > This patch prepares thp migration's core code. These code will be open when
> > unmap_and_move() stops unconditionally splitting thp and get_new_page() 
> > starts
> > to allocate destination thps.
> >
> > Signed-off-by: Naoya Horiguchi 
> > ---
> >  arch/x86/include/asm/pgtable.h| 11 ++
> >  arch/x86/include/asm/pgtable_64.h |  2 +
> >  include/linux/swapops.h   | 62 +++
> >  mm/huge_memory.c  | 78 
> > +++
> >  mm/migrate.c  | 23 
> >  5 files changed, 176 insertions(+)
> >
> > diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> > index 0687c47..0df9afe 100644
> > --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h
> > +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> > @@ -515,6 +515,17 @@ static inline int pmd_present(pmd_t pmd)
> > return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
> >  }
> >
> > +/*
> > + * Unlike pmd_present(), __pmd_present() checks only _PAGE_PRESENT bit.
> > + * Combined with is_migration_entry(), this routine is used to detect pmd
> > + * migration entries. To make it work fine, callers should make sure that
> > + * pmd_trans_huge() returns true beforehand.
> > + */
>
> Hm. I don't this this would fly. What pevents false positive for PROT_NONE
> pmds?

Nothing actually if we use __pmd_present alone. __pmd_present() is now used
only via is_pmd_migration_entry() combined with is_migration_entry(), and
is_migration_entry() should return false for PROT_NONE pmds (because
is_migration_entry() requires characteristic bits SWP_MIGRATION_READ|WRITE,
and they aren't compatible.) But I admit it might not be robust enough.

>
> I guess the problem is _PAGE_PSE, right? I don't really understand why we
> need it in pmd_present().

Yes, _PAGE_PSE in pmd_present() makes this branching harder/complicated.
Some simplification seems necessary.

>
> Andrea?
>
> > +static inline int __pmd_present(pmd_t pmd)
> > +{
> > +   return pmd_flags(pmd) & _PAGE_PRESENT;
> > +}
> > +
> >  #ifdef CONFIG_NUMA_BALANCING
> >  /*
> >   * These work without NUMA balancing but the kernel does not care. See the
> > diff --git 
> > v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> > index 2ee7811..df869d0 100644
> > --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h
> > +++ 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> > @@ -153,7 +153,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
> >  ((type) << (_PAGE_BIT_PRESENT + 1)) \
> >  | ((offset) << SWP_OFFSET_SHIFT) })
> >  #define __pte_to_swp_entry(pte)((swp_entry_t) { pte_val((pte)) 
> > })
> > +#define __pmd_to_swp_entry(pte)((swp_entry_t) { pmd_val((pmd)) 
> > })
> >  #define __swp_entry_to_pte(x)  ((pte_t) { .pte = (x).val })
> > +#define __swp_entry_to_pmd(x)  ((pmd_t) { .pmd = (x).val })
> >
> >  extern int kern_addr_valid(unsigned long addr);
> >  extern void cleanup_highmap(void);
> > diff --git v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h 
> > v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> > index 5c3a5f3..b402a2c 100644
> > --- v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h
> > +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> > @@ -163,6 +163,68 @@ static inline int is_write_migration_entry(swp_entry_t 
> > entry)
> >
> >  #endif
> >
> > +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> > +extern int set_pmd_migration_entry(struct page *page,
> > +   struct mm_struct *mm, unsigned long address);
> > +
> > +extern int remove_migration_pmd(struct page *new,
> > +   struct vm_area_struct *vma, unsigned long addr, void *old);
> > +
> > +extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
> > +
> > +static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
> > +{
> > +   swp_entry_t arch_entry;
> > +
> > +   arch_entry = __pmd_to_swp_entry(pmd);
> > +   return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
> > +}
> > +
> > +static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
> > +{
> > +   swp_entry_t arch_entry;
> > +
> > +   arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
> > +   return __swp_entry_to_pmd(arch_entry);
> > +}
> > +
> > +static inline int is_pmd_migration_entry(pmd_t pmd)
> > +{
> > +   return !__pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
> > +}
> > +#else
> > +static inline int set_pmd_migration_entry(struct page 

Re: [PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-03 Thread Kirill A. Shutemov
On Thu, Mar 03, 2016 at 04:41:50PM +0900, Naoya Horiguchi wrote:
> This patch prepares thp migration's core code. These code will be open when
> unmap_and_move() stops unconditionally splitting thp and get_new_page() starts
> to allocate destination thps.
> 
> Signed-off-by: Naoya Horiguchi 
> ---
>  arch/x86/include/asm/pgtable.h| 11 ++
>  arch/x86/include/asm/pgtable_64.h |  2 +
>  include/linux/swapops.h   | 62 +++
>  mm/huge_memory.c  | 78 
> +++
>  mm/migrate.c  | 23 
>  5 files changed, 176 insertions(+)
> 
> diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h 
> v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> index 0687c47..0df9afe 100644
> --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h
> +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> @@ -515,6 +515,17 @@ static inline int pmd_present(pmd_t pmd)
>   return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
>  }
>  
> +/*
> + * Unlike pmd_present(), __pmd_present() checks only _PAGE_PRESENT bit.
> + * Combined with is_migration_entry(), this routine is used to detect pmd
> + * migration entries. To make it work fine, callers should make sure that
> + * pmd_trans_huge() returns true beforehand.
> + */

Hm. I don't this this would fly. What pevents false positive for PROT_NONE
pmds?

I guess the problem is _PAGE_PSE, right? I don't really understand why we
need it in pmd_present().

Andrea?

> +static inline int __pmd_present(pmd_t pmd)
> +{
> + return pmd_flags(pmd) & _PAGE_PRESENT;
> +}
> +
>  #ifdef CONFIG_NUMA_BALANCING
>  /*
>   * These work without NUMA balancing but the kernel does not care. See the
> diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h 
> v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> index 2ee7811..df869d0 100644
> --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h
> +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> @@ -153,7 +153,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
>((type) << (_PAGE_BIT_PRESENT + 1)) \
>| ((offset) << SWP_OFFSET_SHIFT) })
>  #define __pte_to_swp_entry(pte)  ((swp_entry_t) { pte_val((pte)) 
> })
> +#define __pmd_to_swp_entry(pte)  ((swp_entry_t) { pmd_val((pmd)) 
> })
>  #define __swp_entry_to_pte(x)((pte_t) { .pte = (x).val })
> +#define __swp_entry_to_pmd(x)((pmd_t) { .pmd = (x).val })
>  
>  extern int kern_addr_valid(unsigned long addr);
>  extern void cleanup_highmap(void);
> diff --git v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h 
> v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> index 5c3a5f3..b402a2c 100644
> --- v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h
> +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> @@ -163,6 +163,68 @@ static inline int is_write_migration_entry(swp_entry_t 
> entry)
>  
>  #endif
>  
> +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> +extern int set_pmd_migration_entry(struct page *page,
> + struct mm_struct *mm, unsigned long address);
> +
> +extern int remove_migration_pmd(struct page *new,
> + struct vm_area_struct *vma, unsigned long addr, void *old);
> +
> +extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
> +
> +static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
> +{
> + swp_entry_t arch_entry;
> +
> + arch_entry = __pmd_to_swp_entry(pmd);
> + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
> +}
> +
> +static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
> +{
> + swp_entry_t arch_entry;
> +
> + arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
> + return __swp_entry_to_pmd(arch_entry);
> +}
> +
> +static inline int is_pmd_migration_entry(pmd_t pmd)
> +{
> + return !__pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
> +}
> +#else
> +static inline int set_pmd_migration_entry(struct page *page,
> + struct mm_struct *mm, unsigned long address)
> +{
> + return 0;
> +}
> +
> +static inline int remove_migration_pmd(struct page *new,
> + struct vm_area_struct *vma, unsigned long addr, void *old)
> +{
> + return 0;
> +}
> +
> +static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { 
> }
> +
> +static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
> +{
> + return swp_entry(0, 0);
> +}
> +
> +static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
> +{
> + pmd_t pmd = {};
> +
> + return pmd;
> +}
> +
> +static inline int is_pmd_migration_entry(pmd_t pmd)
> +{
> + return 0;
> +}
> +#endif
> +
>  

Re: [PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-03 Thread Kirill A. Shutemov
On Thu, Mar 03, 2016 at 04:41:50PM +0900, Naoya Horiguchi wrote:
> This patch prepares thp migration's core code. These code will be open when
> unmap_and_move() stops unconditionally splitting thp and get_new_page() starts
> to allocate destination thps.
> 
> Signed-off-by: Naoya Horiguchi 
> ---
>  arch/x86/include/asm/pgtable.h| 11 ++
>  arch/x86/include/asm/pgtable_64.h |  2 +
>  include/linux/swapops.h   | 62 +++
>  mm/huge_memory.c  | 78 
> +++
>  mm/migrate.c  | 23 
>  5 files changed, 176 insertions(+)
> 
> diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h 
> v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> index 0687c47..0df9afe 100644
> --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h
> +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
> @@ -515,6 +515,17 @@ static inline int pmd_present(pmd_t pmd)
>   return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
>  }
>  
> +/*
> + * Unlike pmd_present(), __pmd_present() checks only _PAGE_PRESENT bit.
> + * Combined with is_migration_entry(), this routine is used to detect pmd
> + * migration entries. To make it work fine, callers should make sure that
> + * pmd_trans_huge() returns true beforehand.
> + */

Hm. I don't this this would fly. What pevents false positive for PROT_NONE
pmds?

I guess the problem is _PAGE_PSE, right? I don't really understand why we
need it in pmd_present().

Andrea?

> +static inline int __pmd_present(pmd_t pmd)
> +{
> + return pmd_flags(pmd) & _PAGE_PRESENT;
> +}
> +
>  #ifdef CONFIG_NUMA_BALANCING
>  /*
>   * These work without NUMA balancing but the kernel does not care. See the
> diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h 
> v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> index 2ee7811..df869d0 100644
> --- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h
> +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
> @@ -153,7 +153,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
>((type) << (_PAGE_BIT_PRESENT + 1)) \
>| ((offset) << SWP_OFFSET_SHIFT) })
>  #define __pte_to_swp_entry(pte)  ((swp_entry_t) { pte_val((pte)) 
> })
> +#define __pmd_to_swp_entry(pte)  ((swp_entry_t) { pmd_val((pmd)) 
> })
>  #define __swp_entry_to_pte(x)((pte_t) { .pte = (x).val })
> +#define __swp_entry_to_pmd(x)((pmd_t) { .pmd = (x).val })
>  
>  extern int kern_addr_valid(unsigned long addr);
>  extern void cleanup_highmap(void);
> diff --git v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h 
> v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> index 5c3a5f3..b402a2c 100644
> --- v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h
> +++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
> @@ -163,6 +163,68 @@ static inline int is_write_migration_entry(swp_entry_t 
> entry)
>  
>  #endif
>  
> +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> +extern int set_pmd_migration_entry(struct page *page,
> + struct mm_struct *mm, unsigned long address);
> +
> +extern int remove_migration_pmd(struct page *new,
> + struct vm_area_struct *vma, unsigned long addr, void *old);
> +
> +extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
> +
> +static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
> +{
> + swp_entry_t arch_entry;
> +
> + arch_entry = __pmd_to_swp_entry(pmd);
> + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
> +}
> +
> +static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
> +{
> + swp_entry_t arch_entry;
> +
> + arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
> + return __swp_entry_to_pmd(arch_entry);
> +}
> +
> +static inline int is_pmd_migration_entry(pmd_t pmd)
> +{
> + return !__pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
> +}
> +#else
> +static inline int set_pmd_migration_entry(struct page *page,
> + struct mm_struct *mm, unsigned long address)
> +{
> + return 0;
> +}
> +
> +static inline int remove_migration_pmd(struct page *new,
> + struct vm_area_struct *vma, unsigned long addr, void *old)
> +{
> + return 0;
> +}
> +
> +static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { 
> }
> +
> +static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
> +{
> + return swp_entry(0, 0);
> +}
> +
> +static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
> +{
> + pmd_t pmd = {};
> +
> + return pmd;
> +}
> +
> +static inline int is_pmd_migration_entry(pmd_t pmd)
> +{
> + return 0;
> +}
> +#endif
> +
>  #ifdef CONFIG_MEMORY_FAILURE

Re: [PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-03 Thread kbuild test robot
Hi Naoya,

[auto build test ERROR on v4.5-rc6]
[also build test ERROR on next-20160303]
[cannot apply to tip/x86/core asm-generic/master]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improving the system]

url:
https://github.com/0day-ci/linux/commits/Naoya-Horiguchi/mm-page-migration-enhancement-for-thp/20160303-154610
config: arm-at91_dt_defconfig (attached as .config)
reproduce:
wget 
https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross
 -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=arm 

All errors (new ones prefixed by >>):

   In file included from mm/vmscan.c:54:0:
   include/linux/swapops.h: In function 'swp_entry_to_pmd':
>> include/linux/swapops.h:217:2: error: empty scalar initializer
 pmd_t pmd = {};
 ^
   include/linux/swapops.h:217:2: error: (near initialization for 'pmd')

vim +217 include/linux/swapops.h

   211  {
   212  return swp_entry(0, 0);
   213  }
   214  
   215  static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
   216  {
 > 217  pmd_t pmd = {};
   218  
   219  return pmd;
   220  }

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation


.config.gz
Description: Binary data


Re: [PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-03 Thread kbuild test robot
Hi Naoya,

[auto build test ERROR on v4.5-rc6]
[also build test ERROR on next-20160303]
[cannot apply to tip/x86/core asm-generic/master]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improving the system]

url:
https://github.com/0day-ci/linux/commits/Naoya-Horiguchi/mm-page-migration-enhancement-for-thp/20160303-154610
config: arm-at91_dt_defconfig (attached as .config)
reproduce:
wget 
https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross
 -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=arm 

All errors (new ones prefixed by >>):

   In file included from mm/vmscan.c:54:0:
   include/linux/swapops.h: In function 'swp_entry_to_pmd':
>> include/linux/swapops.h:217:2: error: empty scalar initializer
 pmd_t pmd = {};
 ^
   include/linux/swapops.h:217:2: error: (near initialization for 'pmd')

vim +217 include/linux/swapops.h

   211  {
   212  return swp_entry(0, 0);
   213  }
   214  
   215  static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
   216  {
 > 217  pmd_t pmd = {};
   218  
   219  return pmd;
   220  }

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation


.config.gz
Description: Binary data


[PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-02 Thread Naoya Horiguchi
This patch prepares thp migration's core code. These code will be open when
unmap_and_move() stops unconditionally splitting thp and get_new_page() starts
to allocate destination thps.

Signed-off-by: Naoya Horiguchi 
---
 arch/x86/include/asm/pgtable.h| 11 ++
 arch/x86/include/asm/pgtable_64.h |  2 +
 include/linux/swapops.h   | 62 +++
 mm/huge_memory.c  | 78 +++
 mm/migrate.c  | 23 
 5 files changed, 176 insertions(+)

diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
index 0687c47..0df9afe 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
@@ -515,6 +515,17 @@ static inline int pmd_present(pmd_t pmd)
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
 }
 
+/*
+ * Unlike pmd_present(), __pmd_present() checks only _PAGE_PRESENT bit.
+ * Combined with is_migration_entry(), this routine is used to detect pmd
+ * migration entries. To make it work fine, callers should make sure that
+ * pmd_trans_huge() returns true beforehand.
+ */
+static inline int __pmd_present(pmd_t pmd)
+{
+   return pmd_flags(pmd) & _PAGE_PRESENT;
+}
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * These work without NUMA balancing but the kernel does not care. See the
diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
index 2ee7811..df869d0 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
@@ -153,7 +153,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 ((type) << (_PAGE_BIT_PRESENT + 1)) \
 | ((offset) << SWP_OFFSET_SHIFT) })
 #define __pte_to_swp_entry(pte)((swp_entry_t) { pte_val((pte)) 
})
+#define __pmd_to_swp_entry(pte)((swp_entry_t) { pmd_val((pmd)) 
})
 #define __swp_entry_to_pte(x)  ((pte_t) { .pte = (x).val })
+#define __swp_entry_to_pmd(x)  ((pmd_t) { .pmd = (x).val })
 
 extern int kern_addr_valid(unsigned long addr);
 extern void cleanup_highmap(void);
diff --git v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
index 5c3a5f3..b402a2c 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
@@ -163,6 +163,68 @@ static inline int is_write_migration_entry(swp_entry_t 
entry)
 
 #endif
 
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+extern int set_pmd_migration_entry(struct page *page,
+   struct mm_struct *mm, unsigned long address);
+
+extern int remove_migration_pmd(struct page *new,
+   struct vm_area_struct *vma, unsigned long addr, void *old);
+
+extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
+
+static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
+{
+   swp_entry_t arch_entry;
+
+   arch_entry = __pmd_to_swp_entry(pmd);
+   return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
+{
+   swp_entry_t arch_entry;
+
+   arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
+   return __swp_entry_to_pmd(arch_entry);
+}
+
+static inline int is_pmd_migration_entry(pmd_t pmd)
+{
+   return !__pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
+}
+#else
+static inline int set_pmd_migration_entry(struct page *page,
+   struct mm_struct *mm, unsigned long address)
+{
+   return 0;
+}
+
+static inline int remove_migration_pmd(struct page *new,
+   struct vm_area_struct *vma, unsigned long addr, void *old)
+{
+   return 0;
+}
+
+static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
+
+static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
+{
+   return swp_entry(0, 0);
+}
+
+static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
+{
+   pmd_t pmd = {};
+
+   return pmd;
+}
+
+static inline int is_pmd_migration_entry(pmd_t pmd)
+{
+   return 0;
+}
+#endif
+
 #ifdef CONFIG_MEMORY_FAILURE
 
 extern atomic_long_t num_poisoned_pages __read_mostly;
diff --git v4.5-rc5-mmotm-2016-02-24-16-18/mm/huge_memory.c 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/mm/huge_memory.c
index 46ad357..c6d5406 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/mm/huge_memory.c
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/mm/huge_memory.c
@@ -3657,3 +3657,81 @@ static int __init split_huge_pages_debugfs(void)
 }
 late_initcall(split_huge_pages_debugfs);
 #endif
+
+#ifdef 

[PATCH v1 03/11] mm: thp: add helpers related to thp/pmd migration

2016-03-02 Thread Naoya Horiguchi
This patch prepares thp migration's core code. These code will be open when
unmap_and_move() stops unconditionally splitting thp and get_new_page() starts
to allocate destination thps.

Signed-off-by: Naoya Horiguchi 
---
 arch/x86/include/asm/pgtable.h| 11 ++
 arch/x86/include/asm/pgtable_64.h |  2 +
 include/linux/swapops.h   | 62 +++
 mm/huge_memory.c  | 78 +++
 mm/migrate.c  | 23 
 5 files changed, 176 insertions(+)

diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
index 0687c47..0df9afe 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable.h
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable.h
@@ -515,6 +515,17 @@ static inline int pmd_present(pmd_t pmd)
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
 }
 
+/*
+ * Unlike pmd_present(), __pmd_present() checks only _PAGE_PRESENT bit.
+ * Combined with is_migration_entry(), this routine is used to detect pmd
+ * migration entries. To make it work fine, callers should make sure that
+ * pmd_trans_huge() returns true beforehand.
+ */
+static inline int __pmd_present(pmd_t pmd)
+{
+   return pmd_flags(pmd) & _PAGE_PRESENT;
+}
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * These work without NUMA balancing but the kernel does not care. See the
diff --git v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
index 2ee7811..df869d0 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/arch/x86/include/asm/pgtable_64.h
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/arch/x86/include/asm/pgtable_64.h
@@ -153,7 +153,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 ((type) << (_PAGE_BIT_PRESENT + 1)) \
 | ((offset) << SWP_OFFSET_SHIFT) })
 #define __pte_to_swp_entry(pte)((swp_entry_t) { pte_val((pte)) 
})
+#define __pmd_to_swp_entry(pte)((swp_entry_t) { pmd_val((pmd)) 
})
 #define __swp_entry_to_pte(x)  ((pte_t) { .pte = (x).val })
+#define __swp_entry_to_pmd(x)  ((pmd_t) { .pmd = (x).val })
 
 extern int kern_addr_valid(unsigned long addr);
 extern void cleanup_highmap(void);
diff --git v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
index 5c3a5f3..b402a2c 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/include/linux/swapops.h
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/include/linux/swapops.h
@@ -163,6 +163,68 @@ static inline int is_write_migration_entry(swp_entry_t 
entry)
 
 #endif
 
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+extern int set_pmd_migration_entry(struct page *page,
+   struct mm_struct *mm, unsigned long address);
+
+extern int remove_migration_pmd(struct page *new,
+   struct vm_area_struct *vma, unsigned long addr, void *old);
+
+extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
+
+static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
+{
+   swp_entry_t arch_entry;
+
+   arch_entry = __pmd_to_swp_entry(pmd);
+   return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
+{
+   swp_entry_t arch_entry;
+
+   arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
+   return __swp_entry_to_pmd(arch_entry);
+}
+
+static inline int is_pmd_migration_entry(pmd_t pmd)
+{
+   return !__pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
+}
+#else
+static inline int set_pmd_migration_entry(struct page *page,
+   struct mm_struct *mm, unsigned long address)
+{
+   return 0;
+}
+
+static inline int remove_migration_pmd(struct page *new,
+   struct vm_area_struct *vma, unsigned long addr, void *old)
+{
+   return 0;
+}
+
+static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
+
+static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
+{
+   return swp_entry(0, 0);
+}
+
+static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
+{
+   pmd_t pmd = {};
+
+   return pmd;
+}
+
+static inline int is_pmd_migration_entry(pmd_t pmd)
+{
+   return 0;
+}
+#endif
+
 #ifdef CONFIG_MEMORY_FAILURE
 
 extern atomic_long_t num_poisoned_pages __read_mostly;
diff --git v4.5-rc5-mmotm-2016-02-24-16-18/mm/huge_memory.c 
v4.5-rc5-mmotm-2016-02-24-16-18_patched/mm/huge_memory.c
index 46ad357..c6d5406 100644
--- v4.5-rc5-mmotm-2016-02-24-16-18/mm/huge_memory.c
+++ v4.5-rc5-mmotm-2016-02-24-16-18_patched/mm/huge_memory.c
@@ -3657,3 +3657,81 @@ static int __init split_huge_pages_debugfs(void)
 }
 late_initcall(split_huge_pages_debugfs);
 #endif
+
+#ifdef