On 21/06/17 11:25, Jan Beulich wrote:
> Calculate entry PFN and flags just once, making the respective
> variables (and also pg) function wide. Take the opportunity and also
> make the induction variable unsigned.

Hmm -- I'm a fan of keeping the scope of variables limited to where
they're actually used, to make sure stale values don't end up being
used.  pg in particular I think should be kept local to the if()
statements; there's absolutely no benefit to making it function-wide.  I
don't really see much benefit in making 'flags' and 'pfn' function-wide
either; it's not easier to read, IMHO, it just might save a tiny bit of
extra code, at the expense of removing some safety.

If you want to avoid code duplication, it seems like merging the two
if() statements (of which at most one can be true) would be a better
approach.

Something like the attached (build-tested only)?

 -George


> 
> Signed-off-by: Jan Beulich <jbeul...@suse.com>
> 
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -195,7 +195,9 @@ p2m_next_level(struct p2m_domain *p2m, v
>      l1_pgentry_t *p2m_entry;
>      l1_pgentry_t new_entry;
>      void *next;
> -    int i;
> +    struct page_info *pg;
> +    unsigned int i, flags;
> +    unsigned long pfn;
>  
>      if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
>                                        shift, max)) )
> @@ -204,8 +206,6 @@ p2m_next_level(struct p2m_domain *p2m, v
>      /* PoD/paging: Not present doesn't imply empty. */
>      if ( !l1e_get_flags(*p2m_entry) )
>      {
> -        struct page_info *pg;
> -
>          pg = p2m_alloc_ptp(p2m, type);
>          if ( pg == NULL )
>              return -ENOMEM;
> @@ -232,21 +232,17 @@ p2m_next_level(struct p2m_domain *p2m, v
>          }
>      }
>  
> -    ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE));
> +    flags = l1e_get_flags(*p2m_entry);
> +    pfn = l1e_get_pfn(*p2m_entry);
> +    ASSERT(flags & (_PAGE_PRESENT|_PAGE_PSE));
>  
>      /* split 1GB pages into 2MB pages */
> -    if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & 
> _PAGE_PSE) )
> +    if ( type == PGT_l2_page_table && (flags & _PAGE_PSE) )
>      {
> -        unsigned long flags, pfn;
> -        struct page_info *pg;
> -
>          pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
>          if ( pg == NULL )
>              return -ENOMEM;
>  
> -        flags = l1e_get_flags(*p2m_entry);
> -        pfn = l1e_get_pfn(*p2m_entry);
> -
>          l1_entry = __map_domain_page(pg);
>          for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
>          {
> @@ -263,19 +259,14 @@ p2m_next_level(struct p2m_domain *p2m, v
>  
>  
>      /* split single 2MB large page into 4KB page in P2M table */
> -    if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & 
> _PAGE_PSE) )
> +    if ( type == PGT_l1_page_table && (flags & _PAGE_PSE) )
>      {
> -        unsigned long flags, pfn;
> -        struct page_info *pg;
> -
>          pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
>          if ( pg == NULL )
>              return -ENOMEM;
>  
>          /* New splintered mappings inherit the flags of the old superpage, 
>           * with a little reorganisation for the _PAGE_PSE_PAT bit. */
> -        flags = l1e_get_flags(*p2m_entry);
> -        pfn = l1e_get_pfn(*p2m_entry);
>          if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
>              pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
>          else
> 
> 
> 

From 68931738e1f555c929cc77d9338043dc38f4610e Mon Sep 17 00:00:00 2001
From: George Dunlap <george.dun...@citrix.com>
Date: Wed, 21 Jun 2017 16:08:31 +0100
Subject: [PATCH] x86/p2m-pt: simplify p2m_next_level()

[Insert description here]

Signed-off-by: George Dunlap <george.dun...@citrix.com>
---
 xen/arch/x86/mm/p2m-pt.c | 77 +++++++++++++++++++++---------------------------
 1 file changed, 33 insertions(+), 44 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 06e64b8..4e1028e 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -195,7 +195,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
     l1_pgentry_t *p2m_entry;
     l1_pgentry_t new_entry;
     void *next;
-    int i;
+    unsigned int i;
 
     if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
                                       shift, max)) )
@@ -232,12 +232,10 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         }
     }
 
-    ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE));
-
     /* split 1GB pages into 2MB pages */
-    if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+    if ( l1e_get_flags(*p2m_entry) & _PAGE_PSE )
     {
-        unsigned long flags, pfn;
+        unsigned long flags, pfn, level;
         struct page_info *pg;
 
         pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
@@ -247,54 +245,45 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         flags = l1e_get_flags(*p2m_entry);
         pfn = l1e_get_pfn(*p2m_entry);
 
-        l1_entry = __map_domain_page(pg);
-        for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
+        if ( type == PGT_l2_page_table )
         {
-            new_entry = l1e_from_pfn(pfn | (i * L1_PAGETABLE_ENTRIES), flags);
-            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
-            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
+            l1_entry = __map_domain_page(pg);
+            for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
+            {
+                new_entry = l1e_from_pfn(pfn | (i * L1_PAGETABLE_ENTRIES), flags);
+                p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
+                p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
+            }
+            level = 2;
         }
-        unmap_domain_page(l1_entry);
-        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
-                                 P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */
-        p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
-        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
-    }
-
-
-    /* split single 2MB large page into 4KB page in P2M table */
-    if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
-    {
-        unsigned long flags, pfn;
-        struct page_info *pg;
-
-        pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
-        if ( pg == NULL )
-            return -ENOMEM;
-
-        /* New splintered mappings inherit the flags of the old superpage, 
-         * with a little reorganisation for the _PAGE_PSE_PAT bit. */
-        flags = l1e_get_flags(*p2m_entry);
-        pfn = l1e_get_pfn(*p2m_entry);
-        if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
-            pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
         else
-            flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
-        
-        l1_entry = __map_domain_page(pg);
-        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
-            new_entry = l1e_from_pfn(pfn | i, flags);
-            p2m_add_iommu_flags(&new_entry, 0, 0);
-            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1);
+            /* New splintered mappings inherit the flags of the old superpage, 
+             * with a little reorganisation for the _PAGE_PSE_PAT bit. */
+            if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
+                pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
+            else
+                flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
+            
+            l1_entry = __map_domain_page(pg);
+            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+            {
+                new_entry = l1e_from_pfn(pfn | i, flags);
+                p2m_add_iommu_flags(&new_entry, 0, 0);
+                p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1);
+            }
+            level = 1;
         }
-        unmap_domain_page(l1_entry);
         
+        unmap_domain_page(l1_entry);
+            
         new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                  P2M_BASE_FLAGS | _PAGE_RW);
-        p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
-        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
+        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
+        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level+1);
     }
+    else
+        ASSERT(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT);
 
     next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
     if ( unmap )
-- 
2.1.4

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

Reply via email to