Module Name:    src
Committed By:   matt
Date:           Thu Jul 18 22:24:53 UTC 2013

Added Files:
        src/sys/arch/m68k/include: pmap_coldfire.h pte_coldfire.h

Log Message:
pte and pmap files for the soft tlb on coldfire cpus.
(uncompiled and mostly probably wrong.)


To generate a diff of this commit:
cvs rdiff -u -r0 -r1.1 src/sys/arch/m68k/include/pmap_coldfire.h \
    src/sys/arch/m68k/include/pte_coldfire.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Added files:

Index: src/sys/arch/m68k/include/pmap_coldfire.h
diff -u /dev/null src/sys/arch/m68k/include/pmap_coldfire.h:1.1
--- /dev/null	Thu Jul 18 22:24:53 2013
+++ src/sys/arch/m68k/include/pmap_coldfire.h	Thu Jul 18 22:24:53 2013
@@ -0,0 +1,145 @@
+/*	$NetBSD: pmap_coldfire.h,v 1.1 2013/07/18 22:24:53 matt Exp $	*/
+/*-
+ * Copyright (c) 2013 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _M68K_PMAP_COLDFIRE_H_
+#define M68K_PMAP_COLDFIRE_H_
+
+#ifdef _LOCORE
+#error use assym.h instead
+#endif
+
+#if defined(_MODULE)
+#error this file should not be included by loadable kernel modules
+#endif
+
+#ifdef _KERNEL_OPT
+#include "opt_pmap.h"
+#endif
+
+#include <sys/cpu.h>
+#include <sys/kcore.h>
+#include <uvm/uvm_page.h>
+#ifdef __PMAP_PRIVATE
+#include <powerpc/booke/cpuvar.h>
+#include <powerpc/cpuset.h>
+#endif
+
+#define	PMAP_NEED_PROCWR
+
+#include <uvm/pmap/vmpagemd.h>
+
+#include <m68k/pte_coldfire.h>
+
+#define	NBSEG		(NBPG*NPTEPG)
+#define	SEGSHIFT	(PGSHIFT + PGSHIFT - 2)
+#define SEGOFSET	((1 << SEGSHIFT) - 1)
+#define PMAP_SEGTABSIZE	(1 << (32 - SEGSHIFT))
+#define	NPTEPG		(NBPG >> 2)
+
+#define	KERNEL_PID	0
+
+#define PMAP_TLB_MAX			  1
+#define	PMAP_TLB_NUM_PIDS		256
+#define	PMAP_INVALID_SEGTAB_ADDRESS	((pmap_segtab_t *)0xfeeddead)
+
+#define	pmap_phys_address(x)		(x)
+
+void	pmap_procwr(struct proc *, vaddr_t, size_t);
+#define	PMAP_NEED_PROCWR
+
+#ifdef __PMAP_PRIVATE
+struct vm_page *
+	pmap_md_alloc_poolpage(int flags);
+vaddr_t	pmap_md_map_poolpage(paddr_t, vsize_t);
+void	pmap_md_unmap_poolpage(vaddr_t, vsize_t);
+bool	pmap_md_direct_mapped_vaddr_p(vaddr_t);
+bool	pmap_md_io_vaddr_p(vaddr_t);
+paddr_t	pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
+vaddr_t	pmap_md_direct_map_paddr(paddr_t);
+void	pmap_md_init(void);
+
+bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
+
+#ifdef PMAP_MINIMALTLB
+vaddr_t	pmap_kvptefill(vaddr_t, vaddr_t, pt_entry_t);
+#endif
+#endif
+
+void	pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
+vaddr_t	pmap_bootstrap(vaddr_t, vaddr_t, phys_ram_seg_t *, size_t);
+bool	pmap_extract(struct pmap *, vaddr_t, paddr_t *);
+
+static inline paddr_t vtophys(vaddr_t);
+
+static inline paddr_t
+vtophys(vaddr_t va)
+{
+	paddr_t pa;
+
+	if (pmap_extract(pmap_kernel(), va, &pa))
+		return pa;
+	KASSERT(0);
+	return (paddr_t) -1;
+}
+
+#ifdef __PMAP_PRIVATE
+/*
+ * Virtual Cache Alias helper routines.  Not a problem for Booke CPUs.
+ */
+static inline bool
+pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *nptep)
+{
+	return false;
+}
+
+static inline void
+pmap_md_vca_remove(struct vm_page *pg, vaddr_t va)
+{
+
+}
+
+static inline void
+pmap_md_vca_clean(struct vm_page *pg, vaddr_t va, int op)
+{
+}
+
+static inline size_t
+pmap_md_tlb_asid_max(void)
+{
+	return PMAP_TLB_NUM_PIDS - 1;
+}
+#endif
+
+#define	POOL_VTOPHYS(va)	((paddr_t)(vaddr_t)(va))
+#define	POOL_PHYSTOV(pa)	((vaddr_t)(paddr_t)(pa))
+
+#include <uvm/pmap/pmap.h>
+
+#endif /* !_M68K_PMAP_COLDFIRE_H_ */
Index: src/sys/arch/m68k/include/pte_coldfire.h
diff -u /dev/null src/sys/arch/m68k/include/pte_coldfire.h:1.1
--- /dev/null	Thu Jul 18 22:24:53 2013
+++ src/sys/arch/m68k/include/pte_coldfire.h	Thu Jul 18 22:24:53 2013
@@ -0,0 +1,241 @@
+/* $NetBSD: pte_coldfire.h,v 1.1 2013/07/18 22:24:53 matt Exp $ */
+/*-
+ * Copyright (c) 2013 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _M68K_PTE_COLDFIRE_H_
+#define _M68K_PTE_COLDFIRE_H_
+
+#ifdef __ASSEMBLY__
+#error use assym.h instead
+#endif
+
+#ifndef __BSD_PT_ENTRY_T
+#define __BSD_PT_ENTRY_T	__uint32_t
+typedef __BSD_PT_ENTRY_T	pt_entry_t;
+#endif
+
+#define	MMUTR_VA	__BITS(31,10)	// Virtual Address
+#define	MMUTR_ID	__BITS(9,2)	// ASID
+#define	MMUTR_SG	__BIT(1)	// Shared Global
+#define	MMUTR_V		__BIT(0)	// Valid
+
+#define MMUDR_PA	__BITS(31,10)	// Physical Address
+#define	MMUDR_SZ	__BITS(9,8)	// Entry Size
+#define	MMUDR_SZ_1MB	0
+#define	MMUDR_SZ_4KB	1
+#define	MMUDR_SZ_8KB	2
+#define	MMUDR_SZ_16MB	3
+#define	MMUDR_CM	__BITS(7,6)	// Cache Mode
+#define MMUDR_CM_WT	0		// Write-Through
+#define MMUDR_CM_WB	1		// Write-Back (Copy-Back)
+#define MMUDR_CM_NC	2		// Non-cacheable
+#define MMUDR_CM_NCP	2		// Non-cacheable Precise
+#define MMUDR_CM_NCI	3		// Non-cacheable Imprecise
+#define MMUDR_SP	__BIT(5)	// Supervisor Protect
+#define MMUDR_R		__BIT(4)	// Read Access
+#define MMUDR_W		__BIT(3)	// Write Access
+#define MMUDR_X		__BIT(2)	// Execute Access
+#define MMUDR_LK	__BIT(1)	// Lock Entry
+#define MMUDR_MBZ0	__BIT(0)	// Must be zero
+
+/*
+ * The PTE basically the contents of MMUDR[31:2] | MMUAR[0].
+ * We overload the meaning of MMUDR_LK for indicating wired.
+ * It will be cleared before writing to the TLB.
+ */
+
+#ifdef _KERNEL
+
+static inline bool
+pte_cached_p(pt_entry_t pt_entry)
+{
+	return (pt_entry & MMUDR_CM_NC) != MMUDR_CM_NC;
+}
+
+static inline bool
+pte_modified_p(pt_entry_t pt_entry)
+{
+	return (pt_entry & MMUDR_W) == MMUDR_W;
+}
+
+static inline bool
+pte_valid_p(pt_entry_t pt_entry)
+{
+	return (pt_entry & MMUAR_V) == MMUAR_V;
+}
+
+static inline bool
+pte_exec_p(pt_entry_t pt_entry)
+{
+	return (pt_entry & MMUDR_X) == MMUDR_X;
+}
+
+static inline bool
+pte_deferred_exec_p(pt_entry_t pt_entry)
+{
+	return !pte_exec_p(pt_entry);
+}
+
+static inline bool
+pte_wired_p(pt_entry_t pt_entry)
+{
+	return (pt_entry & MMUDR_LK) == MMUDR_LK;
+}
+
+static inline pt_entry_t
+pte_nv_entry(bool kernel)
+{
+	return 0;
+}
+
+static inline paddr_t
+pte_to_paddr(pt_entry_t pt_entry)
+{
+	return (paddr_t)(pt_entry & MMUDR_PA);
+}
+
+static inline pt_entry_t
+pte_ionocached_bits(void)
+{
+	return MMUDR_CM_NCP;
+}
+
+static inline pt_entry_t
+pte_iocached_bits(void)
+{
+	return MMUDR_CM_NCP;
+}
+
+static inline pt_entry_t
+pte_nocached_bits(void)
+{
+	return MMUDR_CM_NCP;
+}
+
+static inline pt_entry_t
+pte_cached_bits(void)
+{
+	return MMUDR_CM_WB;
+}
+
+static inline pt_entry_t
+pte_cached_change(pt_entry_t pt_entry, bool cached)
+{
+	return (pt_entry & ~MMUDR_CM) | (cached ? MMUDR_CM_WB : MMUDR_CM_NCP);
+}
+
+static inline pt_entry_t
+pte_wire_entry(pt_entry_t pt_entry)
+{
+	return pt_entry | MMUDR_LK;
+}
+
+static inline pt_entry_t
+pte_unwire_entry(pt_entry_t pt_entry)
+{
+	return pt_entry & ~MMUDR_LK;
+}
+
+static inline pt_entry_t
+pte_prot_nowrite(pt_entry_t pt_entry)
+{
+	return pt_entry & ~MMUDR_W;
+}
+
+static inline pt_entry_t
+pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot)
+{
+	pt_entry &= ~MMUDR_W;
+	if ((newprot & VM_PROT_EXECUTE) == 0)
+		pt_entry &= ~MMUDR_X;
+	return pt_entry;
+}
+
+static inline pt_entry_t
+pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot)
+{
+	KASSERT(prot & VM_PROT_READ);
+	pt_entry_t pt_entry = MMUDR_R;
+	if (prot & VM_PROT_EXECUTE) {
+		/* Only allow exec for managed pages */
+		if (mdpg != NULL && VM_PAGEMD_EXECPAGE_P(mdpg))
+			pt_entry |= MMUDR_X;
+	}
+	if (prot & VM_PROT_WRITE) {
+		if (mdpg == NULL || VM_PAGEMD_MODIFIED_P(mdpg))
+			pt_entry |= MMUDR_W;
+	}
+	return pt_entry;
+}
+
+static inline pt_entry_t
+pte_flag_bits(struct vm_page_md *mdpg, int flags)
+{
+	if (__predict_false(flags & PMAP_NOCACHE)) {
+		if (__predict_true(mdpg != NULL)) {
+			return pte_nocached_bits();
+		} else {
+			return pte_ionocached_bits();
+		}
+	} else {
+		if (__predict_false(mdpg != NULL)) {
+			return pte_cached_bits();
+		} else {
+			return pte_iocached_bits();
+		}
+	}
+}
+
+static inline pt_entry_t
+pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
+	int flags, bool kernel)
+{
+	pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA;
+
+	pt_entry |= pte_flag_bits(mdpg, flags);
+	pt_entry |= pte_prot_bits(mdpg, prot);
+
+	return pt_entry;
+}
+
+static inline pt_entry_t
+pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
+	int flags)
+{
+	pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA;
+
+	pt_entry |= MMUDR_LK;
+	pt_entry |= pte_flag_bits(mdpg, flags);
+	pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */
+
+	return pt_entry;
+}
+#endif /* _KERNEL_ */
+
+#endif /* _M68K_PTE_COLDFIRE_H_ */

Reply via email to