Re: [RFC PATCH V1 03/33] powerpc/mm: Switch book3s 64 with 64K page size to 4 level page table

2016-01-17 Thread Aneesh Kumar K.V
Balbir Singh  writes:

> On 12/01/16 18:15, Aneesh Kumar K.V wrote:
>> This is needed so that we can support both hash and radix page table
>> using single kernel. Radix kernel uses a 4 level table.
>>

.

> diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h 
> b/arch/powerpc/include/asm/book3s/64/hash-64k.h
>> index 849bbec80f7b..5c9392b71a6b 100644
>> --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
>> +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
>> @@ -1,15 +1,14 @@
>>  #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
>>  #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
>>  
>> -#include 
>> -
>>  #define PTE_INDEX_SIZE  8
>> -#define PMD_INDEX_SIZE  10
>> -#define PUD_INDEX_SIZE  0
>> +#define PMD_INDEX_SIZE  5
>> +#define PUD_INDEX_SIZE  5
>>  #define PGD_INDEX_SIZE  12
>
>
> 10 splits to 5 and 5 for PMD/PUD? Does this impact huge page?


Nope. We have huge page at top level and pmd level. (16G and 16M)

>
>>  
>>  #define PTRS_PER_PTE(1 << PTE_INDEX_SIZE)
>>  #define PTRS_PER_PMD(1 << PMD_INDEX_SIZE)
>> +#define PTRS_PER_PUD(1 << PUD_INDEX_SIZE)
>>  #define PTRS_PER_PGD(1 << PGD_INDEX_SIZE)
>>  
>>  /* With 4k base page size, hugepage PTEs go at the PMD level */
>> @@ -20,8 +19,13 @@
>>  #define PMD_SIZE(1UL << PMD_SHIFT)
>>  #define PMD_MASK(~(PMD_SIZE-1))
>>  
>> +/* PUD_SHIFT determines what a third-level page table entry can map */
>> +#define PUD_SHIFT   (PMD_SHIFT + PMD_INDEX_SIZE)
>> +#define PUD_SIZE(1UL << PUD_SHIFT)
>> +#define PUD_MASK(~(PUD_SIZE-1))
>> +
>>  /* PGDIR_SHIFT determines what a third-level page table entry can map */
>> -#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
>> +#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
>>  #define PGDIR_SIZE  (1UL << PGDIR_SHIFT)
>>  #define PGDIR_MASK  (~(PGDIR_SIZE-1))
>>  
>> @@ -61,6 +65,8 @@
>>  #define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1)
>>  /* Bits to mask out from a PGD/PUD to get to the PMD page */
>>  #define PUD_MASKED_BITS 0x1ff
>> +/* FIXME!! check this */
>
> Shouldn't PUD_MASKED_BITS be 0x1f?
>
>> +#define PGD_MASKED_BITS 0
>>  
> 0?
>


The MASKED_BITS need to be cleaned up hence the FIXME!! Linux page table
are aligned differently and I didn't want to cleanup that in this
series. IMHO using #defines like above instead of deriving it from the
pmd table align value is wrong. Will get to that later. 



>>  #ifndef __ASSEMBLY__
>>  
>> @@ -130,11 +136,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned 
>> long index);
>>  #else
>>  #define PMD_TABLE_SIZE  (sizeof(pmd_t) << PMD_INDEX_SIZE)
>>  #endif
>> +#define PUD_TABLE_SIZE  (sizeof(pud_t) << PUD_INDEX_SIZE)
>>  #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
>>  
>> -#define pgd_pte(pgd)(pud_pte(((pud_t){ pgd })))
>> -#define pte_pgd(pte)((pgd_t)pte_pud(pte))
>> -
>>  #ifdef CONFIG_HUGETLB_PAGE

-aneesh

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [RFC PATCH V1 03/33] powerpc/mm: Switch book3s 64 with 64K page size to 4 level page table

2016-01-14 Thread Balbir Singh


On 12/01/16 18:15, Aneesh Kumar K.V wrote:
> This is needed so that we can support both hash and radix page table
> using single kernel. Radix kernel uses a 4 level table.
>
> Signed-off-by: Aneesh Kumar K.V 
> ---
>  arch/powerpc/Kconfig  |  1 +
>  arch/powerpc/include/asm/book3s/64/hash-4k.h  | 33 
> +--
>  arch/powerpc/include/asm/book3s/64/hash-64k.h | 20 +---
>  arch/powerpc/include/asm/book3s/64/hash.h |  8 +++
>  arch/powerpc/include/asm/book3s/64/pgtable.h  | 25 +++-
>  arch/powerpc/include/asm/pgalloc-64.h | 24 ---
>  arch/powerpc/include/asm/pgtable-types.h  | 13 +++
>  arch/powerpc/mm/init_64.c | 21 -
>  8 files changed, 90 insertions(+), 55 deletions(-)
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 378f1127ca98..618afea4c9fc 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -303,6 +303,7 @@ config ZONE_DMA32
>  config PGTABLE_LEVELS
>   int
>   default 2 if !PPC64
> + default 4 if PPC_BOOK3S_64
>   default 3 if PPC_64K_PAGES
>   default 4
>  
> diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h 
> b/arch/powerpc/include/asm/book3s/64/hash-4k.h
> index ea0414d6659e..c78f5928001b 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
> @@ -57,39 +57,8 @@
>  #define _PAGE_4K_PFN 0
>  #ifndef __ASSEMBLY__
>  /*
> - * 4-level page tables related bits
> + * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
>   */
> -
> -#define pgd_none(pgd)(!pgd_val(pgd))
> -#define pgd_bad(pgd) (pgd_val(pgd) == 0)
> -#define pgd_present(pgd) (pgd_val(pgd) != 0)
> -#define pgd_page_vaddr(pgd)  (pgd_val(pgd) & ~PGD_MASKED_BITS)
> -
> -static inline void pgd_clear(pgd_t *pgdp)
> -{
> - *pgdp = __pgd(0);
> -}
> -
> -static inline pte_t pgd_pte(pgd_t pgd)
> -{
> - return __pte(pgd_val(pgd));
> -}
> -
> -static inline pgd_t pte_pgd(pte_t pte)
> -{
> - return __pgd(pte_val(pte));
> -}
> -extern struct page *pgd_page(pgd_t pgd);
> -
> -#define pud_offset(pgdp, addr)   \
> -  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
> -(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
> -
> -#define pud_ERROR(e) \
> - pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
> -
> -/*
> - * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
>  #define remap_4k_pfn(vma, addr, pfn, prot)   \
>   remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
>  
> diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h 
> b/arch/powerpc/include/asm/book3s/64/hash-64k.h
> index 849bbec80f7b..5c9392b71a6b 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
> @@ -1,15 +1,14 @@
>  #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
>  #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
>  
> -#include 
> -
>  #define PTE_INDEX_SIZE  8
> -#define PMD_INDEX_SIZE  10
> -#define PUD_INDEX_SIZE   0
> +#define PMD_INDEX_SIZE  5
> +#define PUD_INDEX_SIZE   5
>  #define PGD_INDEX_SIZE  12


10 splits to 5 and 5 for PMD/PUD? Does this impact huge page?

>  
>  #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
>  #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
> +#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
>  #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
>  
>  /* With 4k base page size, hugepage PTEs go at the PMD level */
> @@ -20,8 +19,13 @@
>  #define PMD_SIZE (1UL << PMD_SHIFT)
>  #define PMD_MASK (~(PMD_SIZE-1))
>  
> +/* PUD_SHIFT determines what a third-level page table entry can map */
> +#define PUD_SHIFT(PMD_SHIFT + PMD_INDEX_SIZE)
> +#define PUD_SIZE (1UL << PUD_SHIFT)
> +#define PUD_MASK (~(PUD_SIZE-1))
> +
>  /* PGDIR_SHIFT determines what a third-level page table entry can map */
> -#define PGDIR_SHIFT  (PMD_SHIFT + PMD_INDEX_SIZE)
> +#define PGDIR_SHIFT  (PUD_SHIFT + PUD_INDEX_SIZE)
>  #define PGDIR_SIZE   (1UL << PGDIR_SHIFT)
>  #define PGDIR_MASK   (~(PGDIR_SIZE-1))
>  
> @@ -61,6 +65,8 @@
>  #define PMD_MASKED_BITS  (PTE_FRAG_SIZE - 1)
>  /* Bits to mask out from a PGD/PUD to get to the PMD page */
>  #define PUD_MASKED_BITS  0x1ff
> +/* FIXME!! check this */

Shouldn't PUD_MASKED_BITS be 0x1f?

> +#define PGD_MASKED_BITS  0
>  
0?

>  #ifndef __ASSEMBLY__
>  
> @@ -130,11 +136,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned 
> long index);
>  #else
>  #define PMD_TABLE_SIZE   (sizeof(pmd_t) << PMD_INDEX_SIZE)
>  #endif
> +#define PUD_TABLE_SIZE   (sizeof(pud_t) << PUD_INDEX_SIZE)
>  #define PGD_TABLE_SIZE   (sizeof(pgd_t) << PGD_INDEX_SIZE)
>  
> -#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
> -#define pte_pgd(pte) ((pgd_t)pte_pud(pte))
> -
>  #ifdef CONFIG_HUGETLB_PAGE
>  /*
>   * We have PGD_INDEX_SIZ = 12 and 

Re: [RFC PATCH V1 03/33] powerpc/mm: Switch book3s 64 with 64K page size to 4 level page table

2016-01-13 Thread Balbir Singh
On Tue, 12 Jan 2016 12:45:38 +0530
"Aneesh Kumar K.V"  wrote:

> This is needed so that we can support both hash and radix page table
> using single kernel. Radix kernel uses a 4 level table.
> 
> Signed-off-by: Aneesh Kumar K.V 
> ---
>  arch/powerpc/Kconfig  |  1 +
>  arch/powerpc/include/asm/book3s/64/hash-4k.h  | 33
> +--
> arch/powerpc/include/asm/book3s/64/hash-64k.h | 20 +---
> arch/powerpc/include/asm/book3s/64/hash.h |  8 +++
> arch/powerpc/include/asm/book3s/64/pgtable.h  | 25
> +++- arch/powerpc/include/asm/pgalloc-64.h |
> 24 --- arch/powerpc/include/asm/pgtable-types.h
> | 13 +++ arch/powerpc/mm/init_64.c | 21
> - 8 files changed, 90 insertions(+), 55 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 378f1127ca98..618afea4c9fc 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -303,6 +303,7 @@ config ZONE_DMA32

snip
> -
>  #define PTE_INDEX_SIZE  8
> -#define PMD_INDEX_SIZE  10
> -#define PUD_INDEX_SIZE   0
> +#define PMD_INDEX_SIZE  5
> +#define PUD_INDEX_SIZE   5
>  #define PGD_INDEX_SIZE  12
>  

OK, so PMD index split from 10 to 5 and 5 to PMD/PUD? What is the plan
for huge pages, I saw you mentioned it was a TODO

>  #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
>  #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
> +#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
>  #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
>  
>  /* With 4k base page size, hugepage PTEs go at the PMD level */
> @@ -20,8 +19,13 @@
>  #define PMD_SIZE (1UL << PMD_SHIFT)
>  #define PMD_MASK (~(PMD_SIZE-1))
>  
> +/* PUD_SHIFT determines what a third-level page table entry can map
> */ +#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
> +#define PUD_SIZE (1UL << PUD_SHIFT)
> +#define PUD_MASK (~(PUD_SIZE-1))
> +
>  /* PGDIR_SHIFT determines what a third-level page table entry can
> map */ -#define PGDIR_SHIFT   (PMD_SHIFT + PMD_INDEX_SIZE)
> +#define PGDIR_SHIFT  (PUD_SHIFT + PUD_INDEX_SIZE)
>  #define PGDIR_SIZE   (1UL << PGDIR_SHIFT)
>  #define PGDIR_MASK   (~(PGDIR_SIZE-1))
>  
> @@ -61,6 +65,8 @@
>  #define PMD_MASKED_BITS  (PTE_FRAG_SIZE - 1)
>  /* Bits to mask out from a PGD/PUD to get to the PMD page */

The comment looks like it applied to PMD and not PUD.
>  #define PUD_MASKED_BITS  0x1ff

Given that PUD is now 5 bits, this should be 0x1f?

> +/* FIXME!! check this */
> +#define PGD_MASKED_BITS  0
>  

PGD_MASKED_BITS is 0? Shouldn't it be 0xfe

>  #ifndef __ASSEMBLY__
>  
> @@ -130,11 +136,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte,
> unsigned long index); #else
>  #define PMD_TABLE_SIZE   (sizeof(pmd_t) << PMD_INDEX_SIZE)
>  #endif
> +#define PUD_TABLE_SIZE   (sizeof(pud_t) << PUD_INDEX_SIZE)
>  #define PGD_TABLE_SIZE   (sizeof(pgd_t) << PGD_INDEX_SIZE)
>  
> -#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
> -#define pte_pgd(pte) ((pgd_t)pte_pud(pte))
> -
>  #ifdef CONFIG_HUGETLB_PAGE
>  /*
>   * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can
> have diff --git a/arch/powerpc/include/asm/book3s/64/hash.h
> b/arch/powerpc/include/asm/book3s/64/hash.h index
> f46974d0134a..9ff1e056acef 100644 ---
> a/arch/powerpc/include/asm/book3s/64/hash.h +++
> b/arch/powerpc/include/asm/book3s/64/hash.h @@ -226,6 +226,7 @@
>  #define pud_page_vaddr(pud)  (pud_val(pud) & ~PUD_MASKED_BITS)
>  
>  #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) &
> (PTRS_PER_PGD - 1)) +#define pud_index(address) (((address) >>
> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) #define pmd_index(address)
> (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) #define
> pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) 
> @@ -354,8 +355,15 @@ static inline void __ptep_set_access_flags(pte_t
> *ptep, pte_t entry) :"cc");
>  }
>  
> +static inline int pgd_bad(pgd_t pgd)
> +{
> + return (pgd_val(pgd) == 0);
> +}
> +
>  #define __HAVE_ARCH_PTE_SAME
>  #define pte_same(A,B)(((pte_val(A) ^ pte_val(B)) &
> ~_PAGE_HPTEFLAGS) == 0) +#define pgd_page_vaddr(pgd)
> (pgd_val(pgd) & ~PGD_MASKED_BITS) +
>  
>  /* Generic accessors to PTE bits */
>  static inline int pte_write(pte_t pte)
> { return !!(pte_val(pte) & _PAGE_RW);} diff --git
> a/arch/powerpc/include/asm/book3s/64/pgtable.h
> b/arch/powerpc/include/asm/book3s/64/pgtable.h index
> e7162dba987e..8f639401c7ba 100644 ---
> a/arch/powerpc/include/asm/book3s/64/pgtable.h +++
> b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -111,6 +111,26 @@
> static inline void pgd_set(pgd_t *pgdp, unsigned long val) *pgdp =
> __pgd(val); } 
> +static inline void pgd_clear(pgd_t *pgdp)
> +{
> + *pgdp = __pgd(0);
> +}
> +
> +#define pgd_none(pgd)(!pgd_val(pgd))
> +#define pgd_present(pgd) (!pgd_none(pgd))
> +
> +static inline 

[RFC PATCH V1 03/33] powerpc/mm: Switch book3s 64 with 64K page size to 4 level page table

2016-01-11 Thread Aneesh Kumar K.V
This is needed so that we can support both hash and radix page table
using single kernel. Radix kernel uses a 4 level table.

Signed-off-by: Aneesh Kumar K.V 
---
 arch/powerpc/Kconfig  |  1 +
 arch/powerpc/include/asm/book3s/64/hash-4k.h  | 33 +--
 arch/powerpc/include/asm/book3s/64/hash-64k.h | 20 +---
 arch/powerpc/include/asm/book3s/64/hash.h |  8 +++
 arch/powerpc/include/asm/book3s/64/pgtable.h  | 25 +++-
 arch/powerpc/include/asm/pgalloc-64.h | 24 ---
 arch/powerpc/include/asm/pgtable-types.h  | 13 +++
 arch/powerpc/mm/init_64.c | 21 -
 8 files changed, 90 insertions(+), 55 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 378f1127ca98..618afea4c9fc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -303,6 +303,7 @@ config ZONE_DMA32
 config PGTABLE_LEVELS
int
default 2 if !PPC64
+   default 4 if PPC_BOOK3S_64
default 3 if PPC_64K_PAGES
default 4
 
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h 
b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index ea0414d6659e..c78f5928001b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -57,39 +57,8 @@
 #define _PAGE_4K_PFN   0
 #ifndef __ASSEMBLY__
 /*
- * 4-level page tables related bits
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
  */
-
-#define pgd_none(pgd)  (!pgd_val(pgd))
-#define pgd_bad(pgd)   (pgd_val(pgd) == 0)
-#define pgd_present(pgd)   (pgd_val(pgd) != 0)
-#define pgd_page_vaddr(pgd)(pgd_val(pgd) & ~PGD_MASKED_BITS)
-
-static inline void pgd_clear(pgd_t *pgdp)
-{
-   *pgdp = __pgd(0);
-}
-
-static inline pte_t pgd_pte(pgd_t pgd)
-{
-   return __pte(pgd_val(pgd));
-}
-
-static inline pgd_t pte_pgd(pte_t pte)
-{
-   return __pgd(pte_val(pte));
-}
-extern struct page *pgd_page(pgd_t pgd);
-
-#define pud_offset(pgdp, addr) \
-  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
-(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-
-#define pud_ERROR(e) \
-   pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
-
-/*
- * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
 #define remap_4k_pfn(vma, addr, pfn, prot) \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
 
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h 
b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 849bbec80f7b..5c9392b71a6b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -1,15 +1,14 @@
 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
 
-#include 
-
 #define PTE_INDEX_SIZE  8
-#define PMD_INDEX_SIZE  10
-#define PUD_INDEX_SIZE 0
+#define PMD_INDEX_SIZE  5
+#define PUD_INDEX_SIZE 5
 #define PGD_INDEX_SIZE  12
 
 #define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
 #define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD   (1 << PUD_INDEX_SIZE)
 #define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
 
 /* With 4k base page size, hugepage PTEs go at the PMD level */
@@ -20,8 +19,13 @@
 #define PMD_SIZE   (1UL << PMD_SHIFT)
 #define PMD_MASK   (~(PMD_SIZE-1))
 
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT  (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE   (1UL << PUD_SHIFT)
+#define PUD_MASK   (~(PUD_SIZE-1))
+
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT(PMD_SHIFT + PMD_INDEX_SIZE)
+#define PGDIR_SHIFT(PUD_SHIFT + PUD_INDEX_SIZE)
 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK (~(PGDIR_SIZE-1))
 
@@ -61,6 +65,8 @@
 #define PMD_MASKED_BITS(PTE_FRAG_SIZE - 1)
 /* Bits to mask out from a PGD/PUD to get to the PMD page */
 #define PUD_MASKED_BITS0x1ff
+/* FIXME!! check this */
+#define PGD_MASKED_BITS0
 
 #ifndef __ASSEMBLY__
 
@@ -130,11 +136,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned 
long index);
 #else
 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
 #endif
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
 
-#define pgd_pte(pgd)   (pud_pte(((pud_t){ pgd })))
-#define pte_pgd(pte)   ((pgd_t)pte_pud(pte))
-
 #ifdef CONFIG_HUGETLB_PAGE
 /*
  * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h 
b/arch/powerpc/include/asm/book3s/64/hash.h
index f46974d0134a..9ff1e056acef 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -226,6 +226,7 @@
 #define pud_page_vaddr(pud)(pud_val(pud) & ~PUD_MASKED_BITS)
 
 #define pgd_index(address) (((address)