Hi,

personally, i like running w/pmap asserts w/o DEBUG too, and this
would allow just that, effortless switching between KDASSERT/assert.

-Artturi


Index: sys/arch/arm/include/pmap.h
===================================================================
RCS file: /cvs/src/sys/arch/arm/include/pmap.h,v
retrieving revision 1.40
diff -u -p -u -r1.40 pmap.h
--- sys/arch/arm/include/pmap.h 22 Mar 2016 23:35:01 -0000      1.40
+++ sys/arch/arm/include/pmap.h 28 Jul 2016 14:36:48 -0000
@@ -77,6 +77,8 @@
 #include <arm/cpufunc.h>
 #endif
 
+#define        PMAPASSERT      KDASSERT
+
 /*
  * a pmap describes a processes' 4GB virtual address space.  this
  * virtual address space can be broken up into 4096 1MB regions which
Index: sys/arch/arm/arm/pmap7.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/arm/pmap7.c,v
retrieving revision 1.28
diff -u -p -u -r1.28 pmap7.c
--- sys/arch/arm/arm/pmap7.c    27 Jul 2016 21:12:49 -0000      1.28
+++ sys/arch/arm/arm/pmap7.c    28 Jul 2016 14:36:49 -0000
@@ -832,7 +832,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
        pt_entry_t *ptep;
        u_short l1idx;
 
-       KDASSERT(count <= l2b->l2b_occupancy);
+       PMAPASSERT(count <= l2b->l2b_occupancy);
 
        /*
         * Update the bucket's reference count according to how many
@@ -919,7 +919,7 @@ pmap_l2ptp_ctor(void *v)
         * correct.
         */
        l2b = pmap_get_l2_bucket(pmap_kernel(), va);
-       KDASSERT(l2b != NULL);
+       PMAPASSERT(l2b != NULL);
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        pte = *ptep;
 
@@ -1004,7 +1004,7 @@ pmap_clearbit(struct vm_page *pg, u_int 
                pv->pv_flags &= ~maskbits;
 
                l2b = pmap_get_l2_bucket(pm, va);
-               KDASSERT(l2b != NULL);
+               PMAPASSERT(l2b != NULL);
 
                ptep = &l2b->l2b_kva[l2pte_index(va)];
                npte = opte = *ptep;
@@ -1152,7 +1152,7 @@ pmap_page_remove(struct vm_page *pg)
                pm = pv->pv_pmap;
 
                l2b = pmap_get_l2_bucket(pm, pv->pv_va);
-               KDASSERT(l2b != NULL);
+               PMAPASSERT(l2b != NULL);
 
                ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
                if (*ptep != 0) {
@@ -1253,8 +1253,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 
        NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x 
flag %x\n", pm, va, pa, prot, flags));
 
-       KDASSERT((flags & PMAP_WIRED) == 0 || (flags & PROT_MASK) != 0);
-       KDASSERT(((va | pa) & PGOFSET) == 0);
+       PMAPASSERT((flags & PMAP_WIRED) == 0 || (flags & PROT_MASK) != 0);
+       PMAPASSERT(((va | pa) & PGOFSET) == 0);
 
        /*
         * Get a pointer to the page.  Later on in this function, we
@@ -1593,7 +1593,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
            va, pa, prot));
 
        l2b = pmap_get_l2_bucket(pmap_kernel(), va);
-       KDASSERT(l2b != NULL);
+       PMAPASSERT(l2b != NULL);
 
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        opte = *ptep;
@@ -1642,7 +1642,7 @@ pmap_kremove(vaddr_t va, vsize_t len)
                        next_bucket = eva;
 
                l2b = pmap_get_l2_bucket(pmap_kernel(), va);
-               KDASSERT(l2b != NULL);
+               PMAPASSERT(l2b != NULL);
 
                sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
                mappings = 0;
@@ -1664,7 +1664,7 @@ pmap_kremove(vaddr_t va, vsize_t len)
                        va += PAGE_SIZE;
                        ptep++;
                }
-               KDASSERT(mappings <= l2b->l2b_occupancy);
+               PMAPASSERT(mappings <= l2b->l2b_occupancy);
                l2b->l2b_occupancy -= mappings;
        }
        cpu_cpwait();
@@ -1688,7 +1688,7 @@ pmap_extract(pmap_t pm, vaddr_t va, padd
                /*
                 * These should only happen for pmap_kernel()
                 */
-               KDASSERT(pm == pmap_kernel());
+               PMAPASSERT(pm == pmap_kernel());
                pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
        } else {
                /*
@@ -2116,7 +2116,7 @@ pmap_unwire(pmap_t pm, vaddr_t va)
        NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
 
        l2b = pmap_get_l2_bucket(pm, va);
-       KDASSERT(l2b != NULL);
+       PMAPASSERT(l2b != NULL);
 
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        pte = *ptep;
@@ -2337,7 +2337,7 @@ pmap_grow_map(vaddr_t va, pt_entry_t cac
                *pap = pa;
 
        l2b = pmap_get_l2_bucket(pmap_kernel(), va);
-       KDASSERT(l2b != NULL);
+       PMAPASSERT(l2b != NULL);
 
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        *ptep = L2_S_PROTO | pa | cache_mode |
@@ -2450,7 +2450,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
            printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
            pmap_curmaxkvaddr, maxkvaddr));
 
-       KDASSERT(maxkvaddr <= virtual_end);
+       PMAPASSERT(maxkvaddr <= virtual_end);
 
        /*
         * whoops!   we need to add kernel PTPs
@@ -2462,7 +2462,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
        for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) {
 
                l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
-               KDASSERT(l2b != NULL);
+               PMAPASSERT(l2b != NULL);
 
                /* Distribute new L1 entry to all other L1s */
                TAILQ_FOREACH(l1, &l1_list, l1_link) {
@@ -2502,7 +2502,7 @@ vector_page_setprot(int prot)
        pt_entry_t *ptep;
 
        l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
-       KDASSERT(l2b != NULL);
+       PMAPASSERT(l2b != NULL);
 
        ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
 
@@ -2519,7 +2519,7 @@ vector_page_setprot(int prot)
 void
 pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
 {
-       KDASSERT(pm->pm_l1);
+       PMAPASSERT(pm->pm_l1);
        pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
        pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
            (DOMAIN_CLIENT << (pm->pm_domain * 2));

Reply via email to