Re: [PATCHv4 05/14] x86/xen: Drop 5-level paging support code from XEN_PV code

2017-08-09 Thread Juergen Gross
On 08/08/17 14:54, Kirill A. Shutemov wrote:
> It was decided 5-level paging is not going to be supported in XEN_PV.
> 
> Let's drop the dead code from XEN_PV code.
> 
> Signed-off-by: Kirill A. Shutemov 
> Cc: Juergen Gross 

Reviewed-by: Juergen Gross 
Tested-by: Juergen Gross 


Thanks,

Juergen


Re: [PATCHv4 05/14] x86/xen: Drop 5-level paging support code from XEN_PV code

2017-08-09 Thread Juergen Gross
On 08/08/17 14:54, Kirill A. Shutemov wrote:
> It was decided 5-level paging is not going to be supported in XEN_PV.
> 
> Let's drop the dead code from XEN_PV code.
> 
> Signed-off-by: Kirill A. Shutemov 
> Cc: Juergen Gross 

Reviewed-by: Juergen Gross 
Tested-by: Juergen Gross 


Thanks,

Juergen


[PATCHv4 05/14] x86/xen: Drop 5-level paging support code from XEN_PV code

2017-08-08 Thread Kirill A. Shutemov
It was decided 5-level paging is not going to be supported in XEN_PV.

Let's drop the dead code from XEN_PV code.

Signed-off-by: Kirill A. Shutemov 
Cc: Juergen Gross 
---
 arch/x86/xen/mmu_pv.c | 159 +++---
 1 file changed, 60 insertions(+), 99 deletions(-)

diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index e437714750f8..bc5fddd64217 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -469,7 +469,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
-#if CONFIG_PGTABLE_LEVELS == 4
+#ifdef CONFIG_X86_64
 __visible pudval_t xen_pud_val(pud_t pud)
 {
return pte_mfn_to_pfn(pud.pud);
@@ -558,7 +558,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
 
xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_X86_64 */
 
 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
@@ -600,21 +600,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
bool last, unsigned long limit)
 {
-   int i, nr, flush = 0;
+   int flush = 0;
+   pud_t *pud;
 
-   nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
-   for (i = 0; i < nr; i++) {
-   pud_t *pud;
 
-   if (p4d_none(p4d[i]))
-   continue;
+   if (p4d_none(*p4d))
+   return flush;
 
-   pud = pud_offset([i], 0);
-   if (PTRS_PER_PUD > 1)
-   flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
-   flush |= xen_pud_walk(mm, pud, func,
-   last && i == nr - 1, limit);
-   }
+   pud = pud_offset(p4d, 0);
+   if (PTRS_PER_PUD > 1)
+   flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
+   flush |= xen_pud_walk(mm, pud, func, last, limit);
return flush;
 }
 
@@ -664,8 +660,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
continue;
 
p4d = p4d_offset([i], 0);
-   if (PTRS_PER_P4D > 1)
-   flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}
 
@@ -1196,22 +1190,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr)
 {
pgd_t *pgd;
p4d_t *p4d;
-   unsigned int i;
bool unpin;
 
unpin = (vaddr == 2 * PGDIR_SIZE);
vaddr &= PMD_MASK;
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, 0);
-   for (i = 0; i < PTRS_PER_P4D; i++) {
-   if (p4d_none(p4d[i]))
-   continue;
-   xen_cleanmfnmap_p4d(p4d + i, unpin);
-   }
-   if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
-   set_pgd(pgd, __pgd(0));
-   xen_cleanmfnmap_free_pgtbl(p4d, unpin);
-   }
+   if (!p4d_none(*p4d))
+   xen_cleanmfnmap_p4d(p4d, unpin);
 }
 
 static void __init xen_pagetable_p2m_free(void)
@@ -1717,7 +1703,7 @@ static void xen_release_pmd(unsigned long pfn)
xen_release_ptpage(pfn, PT_PMD);
 }
 
-#if CONFIG_PGTABLE_LEVELS >= 4
+#ifdef CONFIG_X86_64
 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 {
xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -2054,13 +2040,12 @@ static phys_addr_t __init 
xen_early_virt_to_phys(unsigned long vaddr)
  */
 void __init xen_relocate_p2m(void)
 {
-   phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
+   phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
-   int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, 
idx_pud, idx_p4d;
+   int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
pte_t *pt;
pmd_t *pmd;
pud_t *pud;
-   p4d_t *p4d = NULL;
pgd_t *pgd;
unsigned long *new_p2m;
int save_pud;
@@ -2070,11 +2055,7 @@ void __init xen_relocate_p2m(void)
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
-   if (PTRS_PER_P4D > 1)
-   n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
-   else
-   n_p4d = 0;
-   n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
+   n_frames = n_pte + n_pt + n_pmd + n_pud;
 
new_area = xen_find_free_area(PFN_PHYS(n_frames));
if (!new_area) {
@@ -2090,76 +2071,56 @@ void __init xen_relocate_p2m(void)
 * To avoid any possible virtual address collision, just use
 * 2 * PUD_SIZE for the new area.
 */
-   p4d_phys = new_area;
-   pud_phys = p4d_phys + 

[PATCHv4 05/14] x86/xen: Drop 5-level paging support code from XEN_PV code

2017-08-08 Thread Kirill A. Shutemov
It was decided 5-level paging is not going to be supported in XEN_PV.

Let's drop the dead code from XEN_PV code.

Signed-off-by: Kirill A. Shutemov 
Cc: Juergen Gross 
---
 arch/x86/xen/mmu_pv.c | 159 +++---
 1 file changed, 60 insertions(+), 99 deletions(-)

diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index e437714750f8..bc5fddd64217 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -469,7 +469,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
-#if CONFIG_PGTABLE_LEVELS == 4
+#ifdef CONFIG_X86_64
 __visible pudval_t xen_pud_val(pud_t pud)
 {
return pte_mfn_to_pfn(pud.pud);
@@ -558,7 +558,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
 
xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_X86_64 */
 
 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
@@ -600,21 +600,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
bool last, unsigned long limit)
 {
-   int i, nr, flush = 0;
+   int flush = 0;
+   pud_t *pud;
 
-   nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
-   for (i = 0; i < nr; i++) {
-   pud_t *pud;
 
-   if (p4d_none(p4d[i]))
-   continue;
+   if (p4d_none(*p4d))
+   return flush;
 
-   pud = pud_offset([i], 0);
-   if (PTRS_PER_PUD > 1)
-   flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
-   flush |= xen_pud_walk(mm, pud, func,
-   last && i == nr - 1, limit);
-   }
+   pud = pud_offset(p4d, 0);
+   if (PTRS_PER_PUD > 1)
+   flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
+   flush |= xen_pud_walk(mm, pud, func, last, limit);
return flush;
 }
 
@@ -664,8 +660,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
continue;
 
p4d = p4d_offset([i], 0);
-   if (PTRS_PER_P4D > 1)
-   flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}
 
@@ -1196,22 +1190,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr)
 {
pgd_t *pgd;
p4d_t *p4d;
-   unsigned int i;
bool unpin;
 
unpin = (vaddr == 2 * PGDIR_SIZE);
vaddr &= PMD_MASK;
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, 0);
-   for (i = 0; i < PTRS_PER_P4D; i++) {
-   if (p4d_none(p4d[i]))
-   continue;
-   xen_cleanmfnmap_p4d(p4d + i, unpin);
-   }
-   if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
-   set_pgd(pgd, __pgd(0));
-   xen_cleanmfnmap_free_pgtbl(p4d, unpin);
-   }
+   if (!p4d_none(*p4d))
+   xen_cleanmfnmap_p4d(p4d, unpin);
 }
 
 static void __init xen_pagetable_p2m_free(void)
@@ -1717,7 +1703,7 @@ static void xen_release_pmd(unsigned long pfn)
xen_release_ptpage(pfn, PT_PMD);
 }
 
-#if CONFIG_PGTABLE_LEVELS >= 4
+#ifdef CONFIG_X86_64
 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 {
xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -2054,13 +2040,12 @@ static phys_addr_t __init 
xen_early_virt_to_phys(unsigned long vaddr)
  */
 void __init xen_relocate_p2m(void)
 {
-   phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
+   phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
-   int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, 
idx_pud, idx_p4d;
+   int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
pte_t *pt;
pmd_t *pmd;
pud_t *pud;
-   p4d_t *p4d = NULL;
pgd_t *pgd;
unsigned long *new_p2m;
int save_pud;
@@ -2070,11 +2055,7 @@ void __init xen_relocate_p2m(void)
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
-   if (PTRS_PER_P4D > 1)
-   n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
-   else
-   n_p4d = 0;
-   n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
+   n_frames = n_pte + n_pt + n_pmd + n_pud;
 
new_area = xen_find_free_area(PFN_PHYS(n_frames));
if (!new_area) {
@@ -2090,76 +2071,56 @@ void __init xen_relocate_p2m(void)
 * To avoid any possible virtual address collision, just use
 * 2 * PUD_SIZE for the new area.
 */
-   p4d_phys = new_area;
-   pud_phys = p4d_phys + PFN_PHYS(n_p4d);
+   pud_phys = new_area;
pmd_phys