Re: pmap7.c cleanup

2016-08-08 Thread Daniel Bolgheroni
On Mon, Aug 08, 2016 at 09:17:39PM +0200, Mark Kettenis wrote:
> This diff should not introduce any change in behaviour.

Another round tested with a kernel build and works for me on BeagleBone
Black.

-- 
db



pmap7.c cleanup

2016-08-08 Thread Mark Kettenis
The page tables are cached now, and given the significant speedup, I
don't think we'll ever go back.  So let's ditch the code that tries to
check and patch up incorrect memory attributes.

Also realize that pmap_clean_page(pg, FALSE) doesn't do anything
anymore so remove those calls and drop the 2nd argument from
pmap_clean_page(pg, TRUE) calls.

Last but not least, get rid of pmap_pte_init_generic() here.  The only
useful thing it did was setting pmap_copy_page_func() and
pmap_zero_page_func().

This diff should not introduce any change in behaviour.

ok?


Index: pmap7.c
===
RCS file: /cvs/src/sys/arch/arm/arm/pmap7.c,v
retrieving revision 1.35
diff -u -p -r1.35 pmap7.c
--- pmap7.c 8 Aug 2016 14:47:52 -   1.35
+++ pmap7.c 8 Aug 2016 19:06:20 -
@@ -378,7 +378,6 @@ struct pv_entry {
 /*
  * Local prototypes
  */
-intpmap_set_pt_cache_mode(pd_entry_t *, vaddr_t);
 void   pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
pt_entry_t **);
 static boolean_t   pmap_is_current(pmap_t);
@@ -395,10 +394,9 @@ void   pmap_free_l1(pmap_t);
 struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
 struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
 void   pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
-void   pmap_l2ptp_ctor(void *);
 
 void   pmap_clearbit(struct vm_page *, u_int);
-void   pmap_clean_page(struct vm_page *, int);
+void   pmap_clean_page(struct vm_page *);
 void   pmap_page_remove(struct vm_page *);
 
 void   pmap_init_l1(struct l1_ttable *, pd_entry_t *);
@@ -626,12 +624,10 @@ uint nl1;
 void
 pmap_alloc_l1(pmap_t pm, int domain)
 {
-   struct l2_bucket *l2b;
struct l1_ttable *l1;
struct pglist plist;
struct vm_page *m;
pd_entry_t *pl1pt;
-   pt_entry_t *ptep, pte;
vaddr_t va, eva;
int error;
 
@@ -664,22 +660,6 @@ printf("%s: %d %d\n", __func__, domain, 
paddr_t pa = VM_PAGE_TO_PHYS(m);
 
pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
-   /*
-* Make sure the L1 descriptor table is mapped
-* with the cache-mode set to write-through, or
-* correctly synced.
-*/
-   l2b = pmap_get_l2_bucket(pmap_kernel(), va);
-   ptep = &l2b->l2b_kva[l2pte_index(va)];
-   pte = *ptep;
-
-   if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
-   pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
-   *ptep = pte;
-   PTE_SYNC(ptep);
-   cpu_tlb_flushD_SE(va);
-   }
-
m = TAILQ_NEXT(m, pageq);
}
 
@@ -798,7 +778,7 @@ pmap_alloc_l2_bucket(pmap_t pm, vaddr_t 
}
return (NULL);
}
-   pmap_l2ptp_ctor(ptep);
+   PTE_SYNC_RANGE(ptep, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
pmap_extract(pmap_kernel(), (vaddr_t)ptep, &l2b->l2b_phys);
 
l2->l2_occupancy++;
@@ -894,40 +874,6 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
 }
 
 /*
- * Cache constructors for L2 descriptor tables, metadata and pmap
- * structures.
- */
-void
-pmap_l2ptp_ctor(void *v)
-{
-   struct l2_bucket *l2b;
-   pt_entry_t *ptep, pte;
-   vaddr_t va = (vaddr_t)v & ~PGOFSET;
-
-   /*
-* The mappings for these page tables were initially made using
-* pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
-* mode will not be right for page table mappings. To avoid
-* polluting the pmap_kenter_pa() code with a special case for
-* page tables, we simply fix up the cache-mode here if it's not
-* correct.
-*/
-   l2b = pmap_get_l2_bucket(pmap_kernel(), va);
-   KDASSERT(l2b != NULL);
-   ptep = &l2b->l2b_kva[l2pte_index(va)];
-   pte = *ptep;
-
-   if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
-   *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
-   PTE_SYNC(ptep);
-   cpu_tlb_flushD_SE(va);
-   cpu_cpwait();
-   }
-
-   PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
-}
-
-/*
  * Modify pte bits for all ptes corresponding to the given physical address.
  * We use `maskbits' rather than `clearbits' because we're always passing
  * constants and the latter would require an extra inversion at run-time.
@@ -955,13 +901,6 @@ pmap_clearbit(struct vm_page *pg, u_int 
return;
 
/*
-* If we are changing a writable or modified page to
-* read-only (or worse), be sure to flush it first.
-*/
-   if (maskbits & (PVF_WRITE|PVF_MOD))
-   pmap_clean_page(pg, FALSE);
-
-   /*