Module Name:    src
Committed By:   rmind
Date:           Wed Jun  2 03:12:44 UTC 2010

Modified Files:
        src/sys/arch/mips/include [rmind-uvmplock]: types.h
        src/sys/dev [rmind-uvmplock]: mm.c mm.h

Log Message:
Add code, dev_mem_getva() and dev_mem_relva(), to deal with cache-aliasing
issues by allocating an appropriate KVA from physical address, according to
the colour.  Used by architectures, which have such requirement.  For now,
enable only for MIPS, others will follow.  This renames previously invented
mm_md_getva() and mm_md_relva(), since we do this in MI way, instead of MD.
Architectures just need to define __HAVE_MM_MD_CACHE_ALIASING as indicator.

Reviewed by Matt Thomas.


To generate a diff of this commit:
cvs rdiff -u -r1.45.4.1 -r1.45.4.2 src/sys/arch/mips/include/types.h
cvs rdiff -u -r1.13.16.3 -r1.13.16.4 src/sys/dev/mm.c
cvs rdiff -u -r1.1.2.2 -r1.1.2.3 src/sys/dev/mm.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/include/types.h
diff -u src/sys/arch/mips/include/types.h:1.45.4.1 src/sys/arch/mips/include/types.h:1.45.4.2
--- src/sys/arch/mips/include/types.h:1.45.4.1	Thu Mar 18 04:36:50 2010
+++ src/sys/arch/mips/include/types.h	Wed Jun  2 03:12:44 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.45.4.1 2010/03/18 04:36:50 rmind Exp $	*/
+/*	$NetBSD: types.h,v 1.45.4.2 2010/06/02 03:12:44 rmind Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -145,5 +145,6 @@
 
 #define	__HAVE_MM_MD_DIRECT_MAPPED_PHYS
 #define	__HAVE_MM_MD_KERNACC
+#define	__HAVE_MM_MD_CACHE_ALIASING
 
 #endif	/* _MACHTYPES_H_ */

Index: src/sys/dev/mm.c
diff -u src/sys/dev/mm.c:1.13.16.3 src/sys/dev/mm.c:1.13.16.4
--- src/sys/dev/mm.c:1.13.16.3	Sun Apr 25 21:08:45 2010
+++ src/sys/dev/mm.c	Wed Jun  2 03:12:43 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: mm.c,v 1.13.16.3 2010/04/25 21:08:45 rmind Exp $	*/
+/*	$NetBSD: mm.c,v 1.13.16.4 2010/06/02 03:12:43 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2002, 2008, 2010 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.13.16.3 2010/04/25 21:08:45 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.13.16.4 2010/06/02 03:12:43 rmind Exp $");
 
 #include "opt_compat_netbsd.h"
 
@@ -90,13 +90,49 @@
 	pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
 	KASSERT(pg != 0);
 	pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
+	pmap_update(pmap_kernel());
 	dev_zero_page = (void *)pg;
 
-#ifndef __HAVE_MM_MD_PREFER_VA
+#ifndef __HAVE_MM_MD_CACHE_ALIASING
 	/* KVA for mappings during I/O. */
 	dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
 	    UVM_KMF_VAONLY|UVM_KMF_WAITVA);
 	KASSERT(dev_mem_addr != 0);
+#else
+	dev_mem_addr = 0;
+#endif
+}
+
+
+/*
+ * dev_mem_getva: get a special virtual address.  If architecture requires,
+ * allocate VA according to PA, which avoids cache-aliasing issues.  Use a
+ * constant, general mapping address otherwise.
+ */
+static inline vaddr_t
+dev_mem_getva(paddr_t pa)
+{
+#ifdef __HAVE_MM_MD_CACHE_ALIASING
+	const vsize_t coloroff = trunc_page(pa) & ptoa(uvmexp.colormask);
+	const vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE + coloroff,
+	    ptoa(uvmexp.ncolors), UVM_KMF_VAONLY | UVM_KMF_WAITVA);
+
+	return kva + coloroff;
+#else
+	return dev_mem_addr;
+#endif
+}
+
+static inline void
+dev_mem_relva(paddr_t pa, vaddr_t va)
+{
+#ifdef __HAVE_MM_MD_CACHE_ALIASING
+	const vsize_t coloroff = trunc_page(pa) & ptoa(uvmexp.colormask);
+	const vaddr_t origva = va - coloroff;
+
+	uvm_km_free(kernel_map, origva, PAGE_SIZE + coloroff, UVM_KMF_VAONLY);
+#else
+	KASSERT(dev_mem_addr == va);
 #endif
 }
 
@@ -133,12 +169,9 @@
 	have_direct = false;
 #endif
 	if (!have_direct) {
-#ifndef __HAVE_MM_MD_PREFER_VA
-		const vaddr_t va = dev_mem_addr;
-#else
 		/* Get a special virtual address. */
-		const vaddr_t va = mm_md_getva(paddr);
-#endif
+		const vaddr_t va = dev_mem_getva(paddr);
+
 		/* Map selected KVA to physical address. */
 		mutex_enter(&dev_mem_lock);
 		pmap_kenter_pa(va, paddr, prot, 0);
@@ -148,14 +181,13 @@
 		vaddr = va + offset;
 		error = uiomove((void *)vaddr, len, uio);
 
-		/* Unmap.  Note: no need for pmap_update(). */
+		/* Unmap, flush before unlock. */
 		pmap_kremove(va, PAGE_SIZE);
+		pmap_update(pmap_kernel());
 		mutex_exit(&dev_mem_lock);
 
-#ifdef __HAVE_MM_MD_PREFER_VA
 		/* "Release" the virtual address. */
-		mm_md_relva(va);
-#endif
+		dev_mem_relva(paddr, va);
 	} else {
 		/* Direct map, just perform I/O. */
 		vaddr += offset;

Index: src/sys/dev/mm.h
diff -u src/sys/dev/mm.h:1.1.2.2 src/sys/dev/mm.h:1.1.2.3
--- src/sys/dev/mm.h:1.1.2.2	Sun Apr 25 15:27:35 2010
+++ src/sys/dev/mm.h	Wed Jun  2 03:12:43 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: mm.h,v 1.1.2.2 2010/04/25 15:27:35 rmind Exp $	*/
+/*	$NetBSD: mm.h,v 1.1.2.3 2010/06/02 03:12:43 rmind Exp $	*/
 
 /*-
  * Copyright (c) 2008 Joerg Sonnenberger <jo...@netbsd.org>.
@@ -87,12 +87,9 @@
 bool	mm_md_direct_mapped_io(void *, paddr_t *);
 
 /*
- * Optional hooks to select and release a special virtual address,
- * in order to avoid cache aliasing issues on certain architectures.
+ * Some architectures may need to deal with cache aliasing issues.
  *
- * machine/types.h must define __HAVE_MM_MD_PREFER_VA to use this.
+ * machine/types.h must define __HAVE_MM_MD_CACHE_ALIASING to note that.
  */
-vaddr_t	mm_md_getva(paddr_t);
-void	mm_md_relva(vaddr_t);
 
 #endif /* _SYS_DEV_MM_H_ */

Reply via email to