Module Name: src Committed By: matt Date: Wed Feb 13 23:14:35 UTC 2013
Modified Files: src/sys/arch/arm/arm32: pmap.c Log Message: Some armv7 fixes for speculative tlb loads. To generate a diff of this commit: cvs rdiff -u -r1.252 -r1.253 src/sys/arch/arm/arm32/pmap.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/arm/arm32/pmap.c diff -u src/sys/arch/arm/arm32/pmap.c:1.252 src/sys/arch/arm/arm32/pmap.c:1.253 --- src/sys/arch/arm/arm32/pmap.c:1.252 Mon Feb 4 13:37:30 2013 +++ src/sys/arch/arm/arm32/pmap.c Wed Feb 13 23:14:35 2013 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.252 2013/02/04 13:37:30 macallan Exp $ */ +/* $NetBSD: pmap.c,v 1.253 2013/02/13 23:14:35 matt Exp $ */ /* * Copyright 2003 Wasabi Systems, Inc. @@ -212,7 +212,7 @@ #include <arm/cpuconf.h> #include <arm/arm32/katelib.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.252 2013/02/04 13:37:30 macallan Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.253 2013/02/13 23:14:35 matt Exp $"); #ifdef PMAP_DEBUG @@ -605,14 +605,32 @@ struct pv_entry { * Macro to determine if a mapping might be resident in the * instruction cache and/or TLB */ +#if ARM_MMU_V7 > 0 +/* + * Speculative loads by Cortex cores can cause TLB entries to be filled even if + * there are no explicit accesses, so there may be always be TLB entries to + * flush. If we used ASIDs then this would not be a problem. + */ +#define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) +#else #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) +#endif #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) /* * Macro to determine if a mapping might be resident in the * data cache and/or TLB */ +#if ARM_MMU_V7 > 0 +/* + * Speculative loads by Cortex cores can cause TLB entries to be filled even if + * there are no explicit accesses, so there may be always be TLB entries to + * flush. If we used ASIDs then this would not be a problem. + */ +#define PV_BEEN_REFD(f) (1) +#else #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) +#endif /* * Local prototypes @@ -726,7 +744,16 @@ pmap_tlb_flushID(pmap_t pm) if (pm->pm_cstate.cs_tlb_id) { cpu_tlb_flushID(); +#if ARM_MMU_V7 == 0 + /* + * Speculative loads by Cortex cores can cause TLB entries to + * be filled even if there are no explicit accesses, so there + * may be always be TLB entries to flush. If we used ASIDs + * then it would not be a problem. + * This is not true for other CPUs. + */ pm->pm_cstate.cs_tlb = 0; +#endif } } @@ -736,7 +763,16 @@ pmap_tlb_flushD(pmap_t pm) if (pm->pm_cstate.cs_tlb_d) { cpu_tlb_flushD(); +#if ARM_MMU_V7 == 0 + /* + * Speculative loads by Cortex cores can cause TLB entries to + * be filled even if there are no explicit accesses, so there + * may be always be TLB entries to flush. If we used ASIDs + * then it would not be a problem. + * This is not true for other CPUs. + */ pm->pm_cstate.cs_tlb_d = 0; +#endif } }