Re: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

2020-08-09 Thread Jürgen Groß

On 09.08.20 04:22, Boris Ostrovsky wrote:

On 8/7/20 4:38 AM, Juergen Gross wrote:

With support for 32-bit pv guests gone pure pv-code no longer needs to
test for highmem. Dropping those tests removes the need for flushing
in some places.

Signed-off-by: Juergen Gross 



Reviewed-by: Boris Ostrovsky 


with a suggestion



---
  arch/x86/xen/enlighten_pv.c |  11 ++-
  arch/x86/xen/mmu_pv.c   | 138 ++--
  2 files changed, 57 insertions(+), 92 deletions(-)

diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 7d90b3da8bb4..9fec952f84f3 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -347,6 +347,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
unsigned long pfn;
struct page *page;
unsigned char dummy;
+   void *av;



to rename this to va since you are modifying those lines anyway.


Yes.


Juergen


Re: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

2020-08-08 Thread Boris Ostrovsky
On 8/7/20 4:38 AM, Juergen Gross wrote:
> With support for 32-bit pv guests gone pure pv-code no longer needs to
> test for highmem. Dropping those tests removes the need for flushing
> in some places.
>
> Signed-off-by: Juergen Gross 


Reviewed-by: Boris Ostrovsky 


with a suggestion


> ---
>  arch/x86/xen/enlighten_pv.c |  11 ++-
>  arch/x86/xen/mmu_pv.c   | 138 ++--
>  2 files changed, 57 insertions(+), 92 deletions(-)
>
> diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
> index 7d90b3da8bb4..9fec952f84f3 100644
> --- a/arch/x86/xen/enlighten_pv.c
> +++ b/arch/x86/xen/enlighten_pv.c
> @@ -347,6 +347,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
>   unsigned long pfn;
>   struct page *page;
>   unsigned char dummy;
> + void *av;


to rename this to va since you are modifying those lines anyway.


>  
>   ptep = lookup_address((unsigned long)v, );
>   BUG_ON(ptep == NULL);
> @@ -383,14 +384,10 @@ static void set_aliased_prot(void *v, pgprot_t prot)
>   if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
>   BUG();
>  
> - if (!PageHighMem(page)) {
> - void *av = __va(PFN_PHYS(pfn));
> + av = __va(PFN_PHYS(pfn));
>  




Re: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

2020-08-07 Thread kernel test robot
Hi Juergen,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tip/x86/core]
[also build test WARNING on tip/x86/asm v5.8 next-20200806]
[cannot apply to xen-tip/linux-next tip/x86/vdso]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Juergen-Gross/Remove-32-bit-Xen-PV-guest-support/20200807-164058
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
ef2ff0f5d6008d325c9a068e20981c0d0acc4d6b
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# save the attached .config to linux build tree
make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All warnings (new ones prefixed by >>):

   arch/x86/xen/enlighten_pv.c: In function 'set_aliased_prot':
>> arch/x86/xen/enlighten_pv.c:348:15: warning: variable 'page' set but not 
>> used [-Wunused-but-set-variable]
 348 |  struct page *page;
 |   ^~~~
   arch/x86/xen/enlighten_pv.c: At top level:
   arch/x86/xen/enlighten_pv.c:1198:34: warning: no previous prototype for 
'xen_start_kernel' [-Wmissing-prototypes]
1198 | asmlinkage __visible void __init xen_start_kernel(void)
 |  ^~~~

vim +/page +348 arch/x86/xen/enlighten_pv.c

e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  335  
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  336  /*
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  337   * Set the page permissions 
for a particular virtual address.  If the
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  338   * address is a vmalloc 
mapping (or other non-linear mapping), then
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  339   * find the linear mapping 
of the page and also set its protections to
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  340   * match.
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  341   */
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  342  static void 
set_aliased_prot(void *v, pgprot_t prot)
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  343  {
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  344   int level;
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  345   pte_t *ptep;
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  346   pte_t pte;
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  347   unsigned long pfn;
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14 @348   struct page *page;
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  349   unsigned char dummy;
64aef3716eab524 Juergen Gross 2020-08-07  350   void *av;
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  351  
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  352   ptep = 
lookup_address((unsigned long)v, );
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  353   BUG_ON(ptep == NULL);
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  354  
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  355   pfn = pte_pfn(*ptep);
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  356   page = pfn_to_page(pfn);
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  357  
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  358   pte = pfn_pte(pfn, 
prot);
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  359  
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  360   /*
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  361* Careful: 
update_va_mapping() will fail if the virtual address
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  362* we're poking isn't 
populated in the page tables.  We don't
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  363* need to worry about 
the direct map (that's always in the page
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  364* tables), but we need 
to be careful about vmap space.  In
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  365* particular, the top 
level page table can lazily propagate
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  366* entries between 
processes, so if we've switched mms since we
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  367* vmapped the target 
in the first place, we might not have the
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  368* top-level page table 
entry populated.
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  369*
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  370* We disable 
preemption because we want the same mm active when
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  371* we probe the target 
and when we issue the hypercall.  We'll
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  372* have the same 
nominal mm, but if we're a kernel thread, lazy
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  373* mm dropping could 
change our pgd.
e1dab14cf68d1e0 Vitaly Kuznetsov  2017-03-14  374*

[PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

2020-08-07 Thread Juergen Gross
With support for 32-bit pv guests gone pure pv-code no longer needs to
test for highmem. Dropping those tests removes the need for flushing
in some places.

Signed-off-by: Juergen Gross 
---
 arch/x86/xen/enlighten_pv.c |  11 ++-
 arch/x86/xen/mmu_pv.c   | 138 ++--
 2 files changed, 57 insertions(+), 92 deletions(-)

diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 7d90b3da8bb4..9fec952f84f3 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -347,6 +347,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
unsigned long pfn;
struct page *page;
unsigned char dummy;
+   void *av;
 
ptep = lookup_address((unsigned long)v, );
BUG_ON(ptep == NULL);
@@ -383,14 +384,10 @@ static void set_aliased_prot(void *v, pgprot_t prot)
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
 
-   if (!PageHighMem(page)) {
-   void *av = __va(PFN_PHYS(pfn));
+   av = __va(PFN_PHYS(pfn));
 
-   if (av != v)
-   if (HYPERVISOR_update_va_mapping((unsigned long)av, 
pte, 0))
-   BUG();
-   } else
-   kmap_flush_unused();
+   if (av != v && HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
+   BUG();
 
preempt_enable();
 }
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1f9500d0b839..3774fa6d2ef7 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -537,25 +537,26 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d)
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
 #endif  /* CONFIG_PGTABLE_LEVELS >= 5 */
 
-static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
-   int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
-   bool last, unsigned long limit)
+static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
+void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+bool last, unsigned long limit)
 {
-   int i, nr, flush = 0;
+   int i, nr;
 
nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
for (i = 0; i < nr; i++) {
if (!pmd_none(pmd[i]))
-   flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
+   (*func)(mm, pmd_page(pmd[i]), PT_PTE);
}
-   return flush;
 }
 
-static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
-   int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
-   bool last, unsigned long limit)
+static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
+void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+bool last, unsigned long limit)
 {
-   int i, nr, flush = 0;
+   int i, nr;
 
nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
for (i = 0; i < nr; i++) {
@@ -566,29 +567,26 @@ static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
 
pmd = pmd_offset([i], 0);
if (PTRS_PER_PMD > 1)
-   flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
-   flush |= xen_pmd_walk(mm, pmd, func,
-   last && i == nr - 1, limit);
+   (*func)(mm, virt_to_page(pmd), PT_PMD);
+   xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
}
-   return flush;
 }
 
-static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
-   int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
-   bool last, unsigned long limit)
+static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
+void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+bool last, unsigned long limit)
 {
-   int flush = 0;
pud_t *pud;
 
 
if (p4d_none(*p4d))
-   return flush;
+   return;
 
pud = pud_offset(p4d, 0);
if (PTRS_PER_PUD > 1)
-   flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
-   flush |= xen_pud_walk(mm, pud, func, last, limit);
-   return flush;
+   (*func)(mm, virt_to_page(pud), PT_PUD);
+   xen_pud_walk(mm, pud, func, last, limit);
 }
 
 /*
@@ -603,12 +601,12 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
  * We must skip the Xen hole in the middle of the address space, just after
  * the big x86-64 virtual hole.
  */
-static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
- int (*func)(struct mm_struct *mm, struct page *,
- enum pt_level),
- unsigned long limit)
+static void __xen_pgd_walk(struct mm_struct *mm, pgd_t