Hi,
replaces inlined inline, continue feels safer for (pv->pv_flags & PVF_NC) case,
and fixes possible (harmless, i guess) bug w/flags usage in the loop.
to anyone reviewing, I suggest taking a peek at pmap.c also, as it will show
ie. from what flags |= ..->pv_flags is inherited of.
-Artturi
diff --git a/sys/arch/arm/arm/pmap7.c b/sys/arch/arm/arm/pmap7.c
index eea49fe..59d4b90 100644
--- a/sys/arch/arm/arm/pmap7.c
+++ b/sys/arch/arm/arm/pmap7.c
@@ -1068,36 +1068,26 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
void
pmap_clean_page(struct vm_page *pg, int isync)
{
- pmap_t pm;
struct pv_entry *pv;
boolean_t wb = FALSE;
- uint flags = 0;
/*
* To save time, we are only walking the pv list if an I$ invalidation
* is required. Otherwise all we need is to map the page and writeback.
*/
if (isync) {
- if (curproc)
- pm = curproc->p_vmspace->vm_map.pmap;
- else
- pm = pmap_kernel();
-
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
- /* inline !pmap_is_current(pv->pv_pmap) */
- if (pv->pv_pmap != pmap_kernel() && pv->pv_pmap != pm)
+ if (!pmap_is_current(pv->pv_pmap))
continue;
- flags |= pv->pv_flags;
-
/*
* The page is mapped non-cacheable in
* this map. No need to flush the cache.
*/
if (pv->pv_flags & PVF_NC) /* XXX ought to be pg attr */
- break;
+ continue;
- if (PV_BEEN_EXECD(flags))
+ if (PV_BEEN_EXECD(pv->pv_flags))
cpu_icache_sync_range(pv->pv_va, PAGE_SIZE);
/*