Module Name: src Committed By: matt Date: Thu Feb 16 23:02:22 UTC 2012
Modified Files: src/sys/arch/mips/include [matt-nb5-mips64]: pmap.h src/sys/arch/mips/mips [matt-nb5-mips64]: pmap.c pmap_tlb.c Log Message: Move the ksegx tlb init code into its own function. Fix a problem with concurrent shootdowns by tracking what cpus want a shootdown for a pmap, and if anoter cpu wants a shootdown, perform the shootdown on ourselves. To generate a diff of this commit: cvs rdiff -u -r1.54.26.23 -r1.54.26.24 src/sys/arch/mips/include/pmap.h cvs rdiff -u -r1.179.16.42 -r1.179.16.43 src/sys/arch/mips/mips/pmap.c cvs rdiff -u -r1.1.2.22 -r1.1.2.23 src/sys/arch/mips/mips/pmap_tlb.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/mips/include/pmap.h diff -u src/sys/arch/mips/include/pmap.h:1.54.26.23 src/sys/arch/mips/include/pmap.h:1.54.26.24 --- src/sys/arch/mips/include/pmap.h:1.54.26.23 Thu Jan 19 08:28:48 2012 +++ src/sys/arch/mips/include/pmap.h Thu Feb 16 23:02:21 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.54.26.23 2012/01/19 08:28:48 matt Exp $ */ +/* pmap.h,v 1.54.26.23 2012/01/19 08:28:48 matt Exp */ /* * Copyright (c) 1992, 1993 @@ -180,7 +180,7 @@ typedef struct pmap { #ifdef MULTIPROCESSOR volatile uint32_t pm_active; /* pmap was active on ... */ volatile uint32_t pm_onproc; /* pmap is active on ... */ - volatile u_int pm_shootdown_pending; + volatile uint32_t pm_shootdown_pending; #endif union segtab *pm_segtab; /* pointers to pages of PTEs */ u_int pm_count; /* pmap reference count */ @@ -254,6 +254,7 @@ extern vaddr_t mips_virtual_end; * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(void); +void pmap_ksegx_bootstrap(void); void pmap_remove_all(pmap_t); void pmap_set_modified(paddr_t); @@ -262,7 +263,7 @@ void pmap_procwr(struct proc *, vaddr_t, #ifdef MULTIPROCESSOR void pmap_tlb_shootdown_process(void); -bool pmap_tlb_shootdown_bystanders(pmap_t pmap); +bool pmap_tlb_shootdown_bystanders(pmap_t pmap, uint32_t); void pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *); #endif void pmap_syncicache_page(struct vm_page *, uint32_t); Index: src/sys/arch/mips/mips/pmap.c diff -u src/sys/arch/mips/mips/pmap.c:1.179.16.42 src/sys/arch/mips/mips/pmap.c:1.179.16.43 --- src/sys/arch/mips/mips/pmap.c:1.179.16.42 Tue Feb 14 01:51:11 2012 +++ src/sys/arch/mips/mips/pmap.c Thu Feb 16 23:02:22 2012 @@ -443,6 +443,37 @@ pmap_unmap_ephemeral_page(struct vm_page #endif } +#ifdef ENABLE_MIPS_KSEGX +void +pmap_ksegx_bootstrap(void) +{ + const vaddr_t kva_inc = 1 << ((VM_KSEGX_SHIFT - 1) & ~1); + const uint32_t tlb_mask = (2 * kva_inc - 1) & 0x1ffffc00; + + if (mips_ksegx_tlb_slot < 0) { + mips_ksegx_tlb_slot = pmap_tlb0_info.ti_wired; + pmap_tlb0_info.ti_wired += VM_KSEGX_SIZE / (2 * kva_inc); + mips3_cp0_wired_write(pmap_tlb0_info.ti_wired); + } + + u_int tlb_slot = mips_ksegx_tlb_slot; + for (vaddr_t kva = 0; + kva < VM_KSEGX_SIZE; + kva += 2 * kva_inc, tlb_slot++) { + extern pt_entry_t mips_ksegx_pte; + struct tlbmask tlb = { + .tlb_hi = VM_KSEGX_ADDRESS + kva, + .tlb_lo0 = mips_ksegx_pte.pt_entry + + mips_paddr_to_tlbpfn(kva), + .tlb_lo1 = mips_ksegx_pte.pt_entry + + mips_paddr_to_tlbpfn(kva + kva_inc), + .tlb_mask = tlb_mask, + }; + tlb_write_indexed(tlb_slot, &tlb); + } +} +#endif + /* * Bootstrap the system enough to run with virtual memory. * firstaddr is the first unused kseg0 address (not page aligned). @@ -460,22 +491,7 @@ pmap_bootstrap(void) pmap_tlb_info_init(&pmap_tlb0_info); /* init the lock */ #ifdef ENABLE_MIPS_KSEGX - const vaddr_t kva_inc = 1 << ((VM_KSEGX_SHIFT - 1) & ~1); - const uint32_t tlb_mask = (2 * kva_inc - 1) & 0x1ffffc00; - for (vaddr_t kva = 0; kva < VM_KSEGX_SIZE; kva += 2 * kva_inc) { - extern pt_entry_t mips_ksegx_pte; - struct tlbmask tlb = { - .tlb_hi = VM_KSEGX_ADDRESS + kva, - .tlb_lo0 = mips_ksegx_pte.pt_entry - + mips_paddr_to_tlbpfn(kva), - .tlb_lo1 = mips_ksegx_pte.pt_entry - + mips_paddr_to_tlbpfn(kva + kva_inc), - .tlb_mask = tlb_mask, - }; - tlb_write_indexed(pmap_tlb0_info.ti_wired, &tlb); - pmap_tlb0_info.ti_wired++; - } - mips3_cp0_wired_write(pmap_tlb0_info.ti_wired); + pmap_ksegx_bootstrap(); #endif /* @@ -918,7 +934,7 @@ pmap_update(struct pmap *pm) kpreempt_disable(); #ifdef MULTIPROCESSOR u_int pending = atomic_swap_uint(&pm->pm_shootdown_pending, 0); - if (pending && pmap_tlb_shootdown_bystanders(pm)) + if (pending && pmap_tlb_shootdown_bystanders(pm, pending)) PMAP_COUNT(shootdown_ipis); #endif /* Index: src/sys/arch/mips/mips/pmap_tlb.c diff -u src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.22 src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.23 --- src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.22 Thu Jan 19 08:28:50 2012 +++ src/sys/arch/mips/mips/pmap_tlb.c Thu Feb 16 23:02:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp $ */ +/* pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp $"); +__KERNEL_RCSID(0, "pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp"); /* * Manages address spaces in a TLB. @@ -221,6 +221,9 @@ pmap_pai_reset(struct pmap_tlb_info *ti, void pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti) { + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); evcnt_attach_dynamic(&ti->ti_evcnt_asid_reinits, EVCNT_TYPE_MISC, NULL, ti->ti_name, "asid pool reinit"); @@ -290,6 +293,9 @@ pmap_tlb_info_init(struct pmap_tlb_info */ ti->ti_wired = (cpu_info_store.ci_tlb_slot >= 0); pmap_tlbs[ti->ti_index] = ti; + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); #endif /* MULTIPROCESSOR */ } @@ -301,6 +307,10 @@ pmap_tlb_info_attach(struct pmap_tlb_inf KASSERT(ci->ci_data.cpu_idlelwp != NULL); KASSERT(cold); + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); + TLBINFO_LOCK(ti); uint32_t cpu_mask = 1 << cpu_index(ci); ti->ti_cpu_mask |= cpu_mask; @@ -436,10 +446,16 @@ pmap_tlb_shootdown_process(void) struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap; #endif + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); + +#if 0 KASSERT(cpu_intr_p()); KASSERTMSG(ci->ci_cpl >= IPL_SCHED, ("%s: cpl (%d) < IPL_SCHED (%d)", __func__, ci->ci_cpl, IPL_SCHED)); +#endif TLBINFO_LOCK(ti); switch (ti->ti_tlbinvop) { @@ -526,21 +542,84 @@ pmap_tlb_shootdown_process(void) TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL, \ TLBINV_ALLKERNEL, TLBINV_ALL) -bool -pmap_tlb_shootdown_bystanders(pmap_t pm) +static struct cpu_info * +pmap_tlb_target_bystander(struct pmap_tlb_info *ti, struct pmap *pm, + bool kernel_p) { + struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); + TLBINFO_LOCK(ti); + const uint32_t onproc = (pm->pm_onproc & ti->ti_cpu_mask); + if (onproc == 0) { + if (pm->pm_active & ti->ti_cpu_mask) { + /* + * If this pmap has an ASID assigned but it's not + * currently running, nuke its ASID. Next time the + * pmap is activated, it will allocate a new ASID. + * And best of all, we avoid an IPI. + */ + KASSERT(!kernel_p); + pmap_pai_reset(ti, pai, pm); + //ti->ti_evcnt_lazy_shots.ev_count++; + } + TLBINFO_UNLOCK(ti); + return NULL; + } + if (kernel_p) { + ti->ti_tlbinvop = TLBINV_KERNEL_MAP(ti->ti_tlbinvop); + ti->ti_victim = NULL; + } else { + KASSERT(pai->pai_asid); + if (__predict_false(ti->ti_victim == pm)) { + KASSERT(ti->ti_tlbinvop == TLBINV_ONE); + /* + * We still need to invalidate this one + * ASID so there's nothing to change. + */ + } else { + ti->ti_tlbinvop = TLBINV_USER_MAP(ti->ti_tlbinvop); + if (ti->ti_tlbinvop == TLBINV_ONE) + ti->ti_victim = pm; + else + ti->ti_victim = NULL; + } + } + TLBINFO_UNLOCK(ti); /* - * We don't need to deal our own TLB. + * Return a pointer to the cpu_info of one of the tlb_info's cpus */ - uint32_t pm_active = pm->pm_active & ~curcpu()->ci_tlb_info->ti_cpu_mask; + const u_int j = ffs(onproc) - 1; + return cpu_lookup(j); +} + +bool +pmap_tlb_shootdown_bystanders(pmap_t pm, uint32_t pending) +{ + struct cpu_info * const ci = curcpu(); + struct pmap_tlb_info * const curti = ci->ci_tlb_info; + uint32_t pm_active = pm->pm_active & ~curti->ti_cpu_mask; const bool kernel_p = (pm == pmap_kernel()); bool ipi_sent = false; + KDASSERT(curti->ti_name[0] == 't'); + KDASSERT(curti->ti_name[1] == 'l'); + KDASSERT(curti->ti_name[2] == 'b'); + + if (__predict_false(pending & ~curti->ti_cpu_mask) != 0) { + /* + * Now if another cpu (not sharing this tlb_info) wants a + * shootdown, then they must mean us since this pmap is + * obviously active. But since we cleared their bit, they + * won't know they need to do it. So we do it ourselves + * and save them from sending an IPI. + */ + if (pmap_tlb_target_bystander(curti, pm, kernel_p) != NULL) + pmap_tlb_shootdown_process(); + } + /* * If pm_active gets more bits set, then it's after all our changes * have been made so they will already be cognizant of them. */ - for (size_t i = 0; pm_active != 0; i++) { KASSERT(i < pmap_ntlbs); struct pmap_tlb_info * const ti = pmap_tlbs[i]; @@ -548,35 +627,13 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) /* * Skip this TLB if there are no active mappings for it. */ - if ((pm_active & ti->ti_cpu_mask) == 0) + if ((pm_active & ti->ti_cpu_mask) == 0) { continue; - struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); + } pm_active &= ~ti->ti_cpu_mask; - TLBINFO_LOCK(ti); - const uint32_t onproc = (pm->pm_onproc & ti->ti_cpu_mask); - if (onproc != 0) { - if (kernel_p) { - ti->ti_tlbinvop = - TLBINV_KERNEL_MAP(ti->ti_tlbinvop); - ti->ti_victim = NULL; - } else { - KASSERT(pai->pai_asid); - if (__predict_false(ti->ti_victim == pm)) { - KASSERT(ti->ti_tlbinvop == TLBINV_ONE); - /* - * We still need to invalidate this one - * ASID so there's nothing to change. - */ - } else { - ti->ti_tlbinvop = - TLBINV_USER_MAP(ti->ti_tlbinvop); - if (ti->ti_tlbinvop == TLBINV_ONE) - ti->ti_victim = pm; - else - ti->ti_victim = NULL; - } - } - TLBINFO_UNLOCK(ti); + struct cpu_info * const ipi_ci = + pmap_tlb_target_bystander(ti, pm, kernel_p); + if (ipi_ci != NULL) { /* * Now we can send out the shootdown IPIs to a CPU * that shares this TLB and is currently using this @@ -586,24 +643,9 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) * change now that we have released the lock but we * can tolerate spurious shootdowns. */ - KASSERT(onproc != 0); - u_int j = ffs(onproc) - 1; - cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN); + cpu_send_ipi(ipi_ci, IPI_SHOOTDOWN); ipi_sent = true; - continue; - } - if (pm->pm_active & ti->ti_cpu_mask) { - /* - * If this pmap has an ASID assigned but it's not - * currently running, nuke its ASID. Next time the - * pmap is activated, it will allocate a new ASID. - * And best of all, we avoid an IPI. - */ - KASSERT(!kernel_p); - pmap_pai_reset(ti, pai, pm); - //ti->ti_evcnt_lazy_shots.ev_count++; } - TLBINFO_UNLOCK(ti); } return ipi_sent; @@ -617,6 +659,10 @@ pmap_tlb_update_addr(pmap_t pm, vaddr_t struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); int rv = -1; + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); + KASSERT(kpreempt_disabled()); TLBINFO_LOCK(ti); @@ -627,7 +673,9 @@ pmap_tlb_update_addr(pmap_t pm, vaddr_t pmap_tlb_asid_check(); } #ifdef MULTIPROCESSOR - atomic_or_uint(&pm->pm_shootdown_pending, need_ipi); + if (need_ipi && (pm->pm_active & ~ti->ti_cpu_mask) != 0) { + atomic_or_uint(&pm->pm_shootdown_pending, 1 << cpu_number()); + } #endif TLBINFO_UNLOCK(ti); @@ -640,6 +688,10 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info; struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); + KASSERT(kpreempt_disabled()); TLBINFO_LOCK(ti); @@ -650,7 +702,9 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd pmap_tlb_asid_check(); } #ifdef MULTIPROCESSOR - (void) atomic_swap_uint(&pm->pm_shootdown_pending, 1); + if ((pm->pm_active & ~ti->ti_cpu_mask) != 0) { + atomic_or_uint(&pm->pm_shootdown_pending, 1 << cpu_number()); + } #endif TLBINFO_UNLOCK(ti); } @@ -737,6 +791,10 @@ pmap_tlb_asid_acquire(pmap_t pm, struct struct pmap_tlb_info * const ti = ci->ci_tlb_info; struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); + KASSERT(kpreempt_disabled()); /* @@ -831,6 +889,10 @@ pmap_tlb_asid_release_all(struct pmap *p for (u_int i = 0; pm->pm_active != 0; i++) { KASSERT(i < pmap_ntlbs); struct pmap_tlb_info * const ti = pmap_tlbs[i]; + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); + if (pm->pm_active & ti->ti_cpu_mask) { struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); TLBINFO_LOCK(ti); @@ -846,6 +908,9 @@ pmap_tlb_asid_release_all(struct pmap *p */ struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info; struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); + KDASSERT(ti->ti_name[0] == 't'); + KDASSERT(ti->ti_name[1] == 'l'); + KDASSERT(ti->ti_name[2] == 'b'); TLBINFO_LOCK(ti); if (pai->pai_asid) { pmap_pai_reset(ti, pai, pm);