Module Name: src
Committed By: matt
Date: Fri Aug 7 18:39:10 UTC 2009
Modified Files:
src/sys/arch/mips/conf: files.mips
src/sys/arch/mips/mips: cache.c
Added Files:
src/sys/arch/mips/include: cache_ls2.h
src/sys/arch/mips/mips: cache_ls2.c
Log Message:
Add loongson2 specific cache ops
To generate a diff of this commit:
cvs rdiff -u -r1.61 -r1.62 src/sys/arch/mips/conf/files.mips
cvs rdiff -u -r0 -r1.1 src/sys/arch/mips/include/cache_ls2.h
cvs rdiff -u -r1.38 -r1.39 src/sys/arch/mips/mips/cache.c
cvs rdiff -u -r0 -r1.1 src/sys/arch/mips/mips/cache_ls2.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/mips/conf/files.mips
diff -u src/sys/arch/mips/conf/files.mips:1.61 src/sys/arch/mips/conf/files.mips:1.62
--- src/sys/arch/mips/conf/files.mips:1.61 Thu Aug 6 16:13:08 2009
+++ src/sys/arch/mips/conf/files.mips Fri Aug 7 18:39:10 2009
@@ -1,4 +1,4 @@
-# $NetBSD: files.mips,v 1.61 2009/08/06 16:13:08 matt Exp $
+# $NetBSD: files.mips,v 1.62 2009/08/07 18:39:10 matt Exp $
#
defflag opt_cputype.h NOFPU
@@ -48,6 +48,7 @@
file arch/mips/mips/cache_r3k_subr.S mips1
file arch/mips/mips/cache_tx39.c mips1 & enable_mips_tx3900
file arch/mips/mips/cache_tx39_subr.S mips1 & enable_mips_tx3900
+file arch/mips/mips/cache_ls2.c mips3_loongson2
file arch/mips/mips/cache_r4k.c mips3 | mips4
file arch/mips/mips/cache_r5k.c mips3 | mips4
file arch/mips/mips/cache_r5k_subr.S mips3 | mips4
Index: src/sys/arch/mips/mips/cache.c
diff -u src/sys/arch/mips/mips/cache.c:1.38 src/sys/arch/mips/mips/cache.c:1.39
--- src/sys/arch/mips/mips/cache.c:1.38 Thu Aug 6 23:16:39 2009
+++ src/sys/arch/mips/mips/cache.c Fri Aug 7 18:39:10 2009
@@ -1,4 +1,4 @@
-/* $NetBSD: cache.c,v 1.38 2009/08/06 23:16:39 matt Exp $ */
+/* $NetBSD: cache.c,v 1.39 2009/08/07 18:39:10 matt Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
@@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.38 2009/08/06 23:16:39 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.39 2009/08/07 18:39:10 matt Exp $");
#include "opt_cputype.h"
#include "opt_mips_cache.h"
@@ -90,6 +90,9 @@
#ifdef ENABLE_MIPS4_CACHE_R10K
#include <mips/cache_r10k.h>
#endif
+#ifdef MIPS3_LOONGSON2
+#include <mips/cache_ls2.h>
+#endif
#endif
#if defined(MIPS32) || defined(MIPS64)
@@ -661,28 +664,28 @@
mips_cache_virtual_alias = 1;
mips_cache_ops.mco_icache_sync_all =
- r4k_icache_sync_all_32;
+ ls2_icache_sync_all;
mips_cache_ops.mco_icache_sync_range =
- r4k_icache_sync_range_32;
+ ls2_icache_sync_range;
mips_cache_ops.mco_icache_sync_range_index =
- r4k_icache_sync_range_index_32;
+ ls2_icache_sync_range_index;
mips_cache_ops.mco_pdcache_wbinv_all =
- r4k_pdcache_wbinv_all_32;
+ ls2_pdcache_wbinv_all;
mips_cache_ops.mco_pdcache_wbinv_range =
- r4k_pdcache_wbinv_range_32;
+ ls2_pdcache_wbinv_range;
mips_cache_ops.mco_pdcache_wbinv_range_index =
- r4k_pdcache_wbinv_range_index_32;
+ ls2_pdcache_wbinv_range_index;
mips_cache_ops.mco_pdcache_inv_range =
- r4k_pdcache_inv_range_32;
+ ls2_pdcache_inv_range;
mips_cache_ops.mco_pdcache_wb_range =
- r4k_pdcache_wb_range_32;
+ ls2_pdcache_wb_range;
/*
* For current version chips, [the] operating system is
* obliged to eliminate the potential for virtual aliasing.
*/
- uvmexp.ncolors = atop(mips_pdcache_size) / mips_pdcache_ways;
+ uvmexp.ncolors = mips_pdcache_ways;
break;
#endif
#endif /* MIPS3 || MIPS4 */
@@ -837,15 +840,15 @@
mips_scache_unified = 1;
mips_cache_ops.mco_sdcache_wbinv_all =
- r4k_sdcache_wbinv_all_32;
+ ls2_sdcache_wbinv_all;
mips_cache_ops.mco_sdcache_wbinv_range =
- r4k_sdcache_wbinv_range_32;
+ ls2_sdcache_wbinv_range;
mips_cache_ops.mco_sdcache_wbinv_range_index =
- r4k_sdcache_wbinv_range_index_32;
+ ls2_sdcache_wbinv_range_index;
mips_cache_ops.mco_sdcache_inv_range =
- r4k_sdcache_inv_range_32;
+ ls2_sdcache_inv_range;
mips_cache_ops.mco_sdcache_wb_range =
- r4k_sdcache_wb_range_32;
+ ls2_sdcache_wb_range;
/*
* The secondary cache is physically indexed and tagged
Added files:
Index: src/sys/arch/mips/include/cache_ls2.h
diff -u /dev/null src/sys/arch/mips/include/cache_ls2.h:1.1
--- /dev/null Fri Aug 7 18:39:10 2009
+++ src/sys/arch/mips/include/cache_ls2.h Fri Aug 7 18:39:10 2009
@@ -0,0 +1,125 @@
+/* $NetBSD: cache_ls2.h,v 1.1 2009/08/07 18:39:10 matt Exp $ */
+
+/*-
+ * Copyright (c) 2009 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <[email protected]>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MIPS_CACHE_LS2_H_
+#define _MIPS_CACHE_LS2_H_
+
+/*
+ * Cache definitions/operations for Loongson-style caches.
+ */
+#define CACHEOP_LS2_I_INDEX_INV 0
+#define CACHEOP_LS2_D_INDEX_WB_INV 1
+#define CACHEOP_LS2_S_INDEX_WB_INV 3
+#define CACHEOP_LS2_D_HIT_INV 17
+#define CACHEOP_LS2_S_HIT_INV 19
+#define CACHEOP_LS2_D_HIT_WB_INV 21
+#define CACHEOP_LS2_S_HIT_WB_INV 23
+
+#if !defined(_LOCORE)
+/*
+ * The way is encoded in the bottom 2 bits of VA.
+ */
+
+#define cache_op_ls2_8line_4way(va, op) \
+ __asm volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x00(%0); cache %1, 0x01(%0) \n\t" \
+ "cache %1, 0x02(%0); cache %1, 0x03(%0) \n\t" \
+ "cache %1, 0x20(%0); cache %1, 0x21(%0) \n\t" \
+ "cache %1, 0x22(%0); cache %1, 0x23(%0) \n\t" \
+ "cache %1, 0x40(%0); cache %1, 0x41(%0) \n\t" \
+ "cache %1, 0x42(%0); cache %1, 0x43(%0) \n\t" \
+ "cache %1, 0x60(%0); cache %1, 0x61(%0) \n\t" \
+ "cache %1, 0x62(%0); cache %1, 0x63(%0) \n\t" \
+ "cache %1, 0x80(%0); cache %1, 0x81(%0) \n\t" \
+ "cache %1, 0x82(%0); cache %1, 0x83(%0) \n\t" \
+ "cache %1, 0xa0(%0); cache %1, 0xa1(%0) \n\t" \
+ "cache %1, 0xa2(%0); cache %1, 0xa3(%0) \n\t" \
+ "cache %1, 0xc0(%0); cache %1, 0xc1(%0) \n\t" \
+ "cache %1, 0xc2(%0); cache %1, 0xc3(%0) \n\t" \
+ "cache %1, 0xe0(%0); cache %1, 0xe1(%0) \n\t" \
+ "cache %1, 0xe2(%0); cache %1, 0xe3(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory");
+
+#define cache_op_ls2_line_4way(va, op) \
+ __asm volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0(%0); cache %1, 1(%0) \n\t" \
+ "cache %1, 2(%0); cache %1, 3(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory");
+
+#define cache_op_ls2_8line(va, op) \
+ __asm volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x00(%0); cache %1, 0x20(%0) \n\t" \
+ "cache %1, 0x40(%0); cache %1, 0x60(%0) \n\t" \
+ "cache %1, 0x80(%0); cache %1, 0xa0(%0) \n\t" \
+ "cache %1, 0xc0(%0); cache %1, 0xe0(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory");
+
+#define cache_op_ls2_line(va, op) \
+ __asm volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory");
+
+void ls2_icache_sync_all(void);
+void ls2_icache_sync_range(vaddr_t, vsize_t);
+void ls2_icache_sync_range_index(vaddr_t, vsize_t);
+
+void ls2_pdcache_wbinv_all(void);
+void ls2_pdcache_wbinv_range(vaddr_t, vsize_t);
+void ls2_pdcache_wbinv_range_index(vaddr_t, vsize_t);
+
+void ls2_pdcache_inv_range(vaddr_t, vsize_t);
+void ls2_pdcache_wb_range(vaddr_t, vsize_t);
+
+void ls2_sdcache_wbinv_all(void);
+void ls2_sdcache_wbinv_range(vaddr_t, vsize_t);
+void ls2_sdcache_wbinv_range_index(vaddr_t, vsize_t);
+
+void ls2_sdcache_inv_range(vaddr_t, vsize_t);
+void ls2_sdcache_wb_range(vaddr_t, vsize_t);
+
+#endif /* !_LOCORE */
+#endif /* !_MIPS_CACHE_LS2_H_ */
Index: src/sys/arch/mips/mips/cache_ls2.c
diff -u /dev/null src/sys/arch/mips/mips/cache_ls2.c:1.1
--- /dev/null Fri Aug 7 18:39:10 2009
+++ src/sys/arch/mips/mips/cache_ls2.c Fri Aug 7 18:39:10 2009
@@ -0,0 +1,303 @@
+/* $NetBSD: cache_ls2.c,v 1.1 2009/08/07 18:39:10 matt Exp $ */
+
+/*-
+ * Copyright (c) 2009 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <[email protected]>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.1 2009/08/07 18:39:10 matt Exp $");
+
+#include <sys/param.h>
+
+#include <mips/cache.h>
+#include <mips/cache_ls2.h>
+#include <mips/locore.h>
+
+/*
+ * Cache operations for Loongson2-style caches:
+ *
+ * - 4-way set-associative 32b/l
+ * - Write-back
+ * - Primary is virtually indexed, physically tagged
+ * - Seconadry is physically indexed, physically tagged
+ */
+
+#define round_line(x) (((x) + 31) & ~31)
+#define trunc_line(x) ((x) & ~31)
+
+__asm(".set mips3");
+
+void
+ls2_icache_sync_range(vaddr_t va, vsize_t size)
+{
+ const vaddr_t eva = round_line(va + size);
+
+ va = trunc_line(va);
+
+ if (va + mips_picache_way_size <= eva) {
+ ls2_icache_sync_all();
+ return;
+ }
+#if 0
+ mips_dcache_wb_range(va, (eva - va));
+#endif
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_HIT_WB_INV);
+ cache_op_ls2_8line_4way(va, CACHEOP_LS2_I_INDEX_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line_4way(va, CACHEOP_LS2_D_HIT_WB_INV);
+ cache_op_ls2_line_4way(va, CACHEOP_LS2_I_INDEX_INV);
+ va += 32;
+ }
+
+ __asm volatile("sync");
+}
+
+void
+ls2_icache_sync_range_index(vaddr_t va, vsize_t size)
+{
+ vaddr_t eva;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+
+ va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
+ eva = round_line(va + size);
+ va = trunc_line(va);
+
+ if (va + mips_picache_way_size < eva) {
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = mips_picache_way_size;
+ }
+
+#if 0
+ mips_dcache_wbinv_range_index(va, (eva - va));
+ __asm volatile("sync");
+#endif
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
+ cache_op_ls2_8line_4way(va, CACHEOP_LS2_I_INDEX_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
+ cache_op_ls2_line_4way(va, CACHEOP_LS2_I_INDEX_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_icache_sync_all(void)
+{
+ ls2_icache_sync_range_index(0, mips_picache_way_size);
+}
+
+void
+ls2_pdcache_inv_range(vaddr_t va, vsize_t size)
+{
+ const vaddr_t eva = round_line(va + size);
+
+ va = trunc_line(va);
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_pdcache_wbinv_range(vaddr_t va, vsize_t size)
+{
+ const vaddr_t eva = round_line(va + size);
+
+ va = trunc_line(va);
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_pdcache_wb_range(vaddr_t va, vsize_t size)
+{
+ /*
+ * Alas, can't writeback without invalidating...
+ */
+ ls2_pdcache_wbinv_range(va, size);
+}
+
+void
+ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
+{
+ vaddr_t eva;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+ va = MIPS_PHYS_TO_KSEG0(va & mips_pdcache_way_mask);
+
+ eva = round_line(va + size);
+ va = trunc_line(va);
+
+ if (va + mips_pdcache_way_size > eva) {
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = mips_pdcache_way_size;
+ }
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_pdcache_wbinv_all(void)
+{
+ ls2_pdcache_wbinv_range_index(0, mips_pdcache_way_size);
+}
+
+/*
+ * Cache operations for secondary caches:
+ *
+ * - Direct-mapped
+ * - Write-back
+ * - Physically indexed, physically tagged
+ *
+ */
+
+void
+ls2_sdcache_inv_range(vaddr_t va, vsize_t size)
+{
+ const vaddr_t eva = round_line(va + size);
+
+ va = trunc_line(va);
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_sdcache_wbinv_range(vaddr_t va, vsize_t size)
+{
+ const vaddr_t eva = round_line(va + size);
+
+ va = trunc_line(va);
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_WB_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_WB_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_sdcache_wb_range(vaddr_t va, vsize_t size)
+{
+ /*
+ * Alas, can't writeback without invalidating...
+ */
+ ls2_sdcache_wbinv_range(va, size);
+}
+
+void
+ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
+{
+ vaddr_t eva;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+ va = MIPS_PHYS_TO_KSEG0(va & mips_sdcache_way_mask);
+
+ eva = round_line(va + size);
+ va = trunc_line(va);
+
+ if (va + mips_sdcache_way_size > eva) {
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = va + mips_sdcache_way_size;
+ }
+
+ while (va + 8 * 32 <= eva) {
+ cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
+ va += 32;
+ }
+}
+
+void
+ls2_sdcache_wbinv_all(void)
+{
+ ls2_sdcache_wbinv_range_index(0, mips_sdcache_way_size);
+}