Module Name: src
Committed By: skrll
Date: Mon Jun 12 19:04:14 UTC 2023
Modified Files:
src/sys/arch/riscv/conf: files.riscv
src/sys/arch/riscv/fdt: cpu_fdt.c intc_fdt.c riscv_platform.c
src/sys/arch/riscv/include: cpu.h db_machdep.h intr.h machdep.h pmap.h
src/sys/arch/riscv/riscv: clock_machdep.c cpu.c cpu_subr.c
db_interface.c db_machdep.c genassym.cf interrupt.c locore.S
pmap_machdep.c riscv_machdep.c spl.S
Added Files:
src/sys/arch/riscv/fdt: riscv_fdtvar.h
src/sys/arch/riscv/riscv: ipifuncs.c riscv_tlb.c
Log Message:
risc-v: MULTIPROCESSOR support
Add MULTIPROCESSOR support for RISC-V, but leave disabled for the moment
as it's not 100% stable.
Some other improvements to spl and cpu identification / reporting.
To generate a diff of this commit:
cvs rdiff -u -r1.12 -r1.13 src/sys/arch/riscv/conf/files.riscv
cvs rdiff -u -r1.1 -r1.2 src/sys/arch/riscv/fdt/cpu_fdt.c \
src/sys/arch/riscv/fdt/intc_fdt.c src/sys/arch/riscv/fdt/riscv_platform.c
cvs rdiff -u -r0 -r1.1 src/sys/arch/riscv/fdt/riscv_fdtvar.h
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/riscv/include/cpu.h
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/riscv/include/db_machdep.h
cvs rdiff -u -r1.3 -r1.4 src/sys/arch/riscv/include/intr.h
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/riscv/include/machdep.h
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/riscv/include/pmap.h
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/riscv/riscv/clock_machdep.c
cvs rdiff -u -r1.1 -r1.2 src/sys/arch/riscv/riscv/cpu.c \
src/sys/arch/riscv/riscv/interrupt.c
cvs rdiff -u -r1.2 -r1.3 src/sys/arch/riscv/riscv/cpu_subr.c \
src/sys/arch/riscv/riscv/db_interface.c
cvs rdiff -u -r1.10 -r1.11 src/sys/arch/riscv/riscv/db_machdep.c
cvs rdiff -u -r1.14 -r1.15 src/sys/arch/riscv/riscv/genassym.cf
cvs rdiff -u -r0 -r1.1 src/sys/arch/riscv/riscv/ipifuncs.c \
src/sys/arch/riscv/riscv/riscv_tlb.c
cvs rdiff -u -r1.41 -r1.42 src/sys/arch/riscv/riscv/locore.S
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/riscv/riscv/pmap_machdep.c
cvs rdiff -u -r1.28 -r1.29 src/sys/arch/riscv/riscv/riscv_machdep.c
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/riscv/riscv/spl.S
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/riscv/conf/files.riscv
diff -u src/sys/arch/riscv/conf/files.riscv:1.12 src/sys/arch/riscv/conf/files.riscv:1.13
--- src/sys/arch/riscv/conf/files.riscv:1.12 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/conf/files.riscv Mon Jun 12 19:04:13 2023
@@ -1,4 +1,4 @@
-# $NetBSD: files.riscv,v 1.12 2023/05/07 12:41:48 skrll Exp $
+# $NetBSD: files.riscv,v 1.13 2023/06/12 19:04:13 skrll Exp $
#
maxpartitions 16
@@ -43,6 +43,7 @@ file arch/riscv/riscv/kobj_machdep.c mo
file arch/riscv/riscv/pmap_machdep.c
file arch/riscv/riscv/process_machdep.c
file arch/riscv/riscv/procfs_machdep.c procfs
+file arch/riscv/riscv/riscv_tlb.c
file arch/riscv/riscv/riscv_generic_dma.c
file arch/riscv/riscv/riscv_machdep.c
file arch/riscv/riscv/sbi.c # SBI
@@ -63,6 +64,7 @@ file kern/subr_disk_mbr.c disk
file uvm/pmap/pmap.c
file uvm/pmap/pmap_devmap.c
file uvm/pmap/pmap_segtab.c
+file uvm/pmap/pmap_synci.c
file uvm/pmap/pmap_tlb.c
device plic
Index: src/sys/arch/riscv/fdt/cpu_fdt.c
diff -u src/sys/arch/riscv/fdt/cpu_fdt.c:1.1 src/sys/arch/riscv/fdt/cpu_fdt.c:1.2
--- src/sys/arch/riscv/fdt/cpu_fdt.c:1.1 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/fdt/cpu_fdt.c Mon Jun 12 19:04:13 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu_fdt.c,v 1.1 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: cpu_fdt.c,v 1.2 2023/06/12 19:04:13 skrll Exp $ */
/*-
* Copyright (c) 2017 Jared McNeill <[email protected]>
@@ -29,13 +29,167 @@
#include "opt_multiprocessor.h"
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu_fdt.c,v 1.1 2023/05/07 12:41:48 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu_fdt.c,v 1.2 2023/06/12 19:04:13 skrll Exp $");
#include <sys/param.h>
+#include <sys/cpu.h>
#include <dev/fdt/fdtvar.h>
+#include <riscv/cpufunc.h>
#include <riscv/cpuvar.h>
+#include <riscv/machdep.h>
+#include <riscv/sbi.h>
+
+#include <riscv/fdt/riscv_fdtvar.h>
+
+
+#ifdef MULTIPROCESSOR
+static bool
+riscv_fdt_cpu_okay(const int child)
+{
+ const char *s;
+
+ s = fdtbus_get_string(child, "device_type");
+ if (!s || strcmp(s, "cpu") != 0)
+ return false;
+
+ s = fdtbus_get_string(child, "status");
+ if (s) {
+ if (strcmp(s, "okay") == 0)
+ return true;
+ if (strcmp(s, "disabled") == 0)
+ return false;
+ return false;
+ } else {
+ return true;
+ }
+}
+#endif /* MULTIPROCESSOR */
+
+void
+riscv_fdt_cpu_bootstrap(void)
+{
+#ifdef MULTIPROCESSOR
+
+ const int cpus = OF_finddevice("/cpus");
+ if (cpus == -1) {
+ aprint_error("%s: no /cpus node found\n", __func__);
+ riscv_cpu_max = 1;
+ return;
+ }
+
+ /* Count harts and add hart IDs to to cpu_hartid array */
+ size_t cpuindex = 1;
+ for (int child = OF_child(cpus); child; child = OF_peer(child)) {
+ if (!riscv_fdt_cpu_okay(child))
+ continue;
+
+ riscv_cpu_max++;
+
+ uint64_t reg;
+ if (fdtbus_get_reg64(child, 0, ®, NULL) != 0)
+ continue;
+
+ const cpuid_t hartid = reg;
+
+ struct sbiret sbiret = sbi_hart_get_status(hartid);
+ switch (sbiret.error) {
+ case SBI_ERR_INVALID_PARAM:
+ aprint_error("Unknown hart id %lx", hartid);
+ continue;
+ case SBI_SUCCESS:
+ break;
+ default:
+ aprint_error("Unexpected error (%ld) from get_status",
+ sbiret.error);
+ }
+
+ /* Assume the BP is the only one started. */
+ if (sbiret.value == SBI_HART_STARTED) {
+ if (cpu_hartid[0] != -1) {
+ panic("more than 1 hart started");
+ }
+ cpu_hartid[0] = hartid;
+ continue;
+ }
+
+ KASSERT(cpuindex < MAXCPUS);
+ cpu_hartid[cpuindex] = hartid;
+ cpu_dcache_wb_range((vaddr_t)&cpu_hartid[cpuindex],
+ sizeof(cpu_hartid[cpuindex]));
+
+ cpuindex++;
+ }
+#endif
+}
+int
+riscv_fdt_cpu_mpstart(void)
+{
+ int ret = 0;
+#ifdef MULTIPROCESSOR
+ const int cpus = OF_finddevice("/cpus");
+ if (cpus == -1) {
+ aprint_error("%s: no /cpus node found\n", __func__);
+ return 0;
+ }
+
+ // riscv_fdt_cpu_bootstrap put the boot hart id in cpu_hartid[0]
+ const cpuid_t bp_hartid = cpu_hartid[0];
+
+ /* BootAPs */
+ size_t cpuindex = 1;
+ for (int child = OF_child(cpus); child; child = OF_peer(child)) {
+ if (!riscv_fdt_cpu_okay(child))
+ continue;
+
+ uint64_t reg;
+ if (fdtbus_get_reg64(child, 0, ®, NULL) != 0)
+ continue;
+
+ cpuid_t hartid = reg;
+
+ if (hartid == bp_hartid)
+ continue; /* BP already started */
+
+ const paddr_t entry = KERN_VTOPHYS(cpu_mpstart);
+ struct sbiret sbiret = sbi_hart_start(hartid, entry, 0);
+ switch (sbiret.error) {
+ case SBI_SUCCESS:
+ break;
+ case SBI_ERR_INVALID_ADDRESS:
+ break;
+ case SBI_ERR_INVALID_PARAM:
+ break;
+ case SBI_ERR_ALREADY_AVAILABLE:
+ break;
+ case SBI_ERR_FAILED:
+ break;
+ default:
+ aprint_error("%s: failed to enable CPU %#lx\n",
+ __func__, hartid);
+ }
+
+ size_t i;
+ /* Wait for AP to start */
+ for (i = 0x10000000; i > 0; i--) {
+ if (cpu_hatched_p(cpuindex))
+ break;
+ }
+
+ if (i == 0) {
+ ret++;
+ aprint_error("cpu%zu: WARNING: AP failed to start\n",
+ cpuindex);
+ }
+
+ cpuindex++;
+ }
+#else
+ aprint_normal("%s: kernel compiled without MULTIPROCESSOR\n", __func__);
+#endif /* MULTIPROCESSOR */
+ return ret;
+}
static int
cpu_fdt_match(device_t parent, cfdata_t cf, void *aux)
Index: src/sys/arch/riscv/fdt/intc_fdt.c
diff -u src/sys/arch/riscv/fdt/intc_fdt.c:1.1 src/sys/arch/riscv/fdt/intc_fdt.c:1.2
--- src/sys/arch/riscv/fdt/intc_fdt.c:1.1 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/fdt/intc_fdt.c Mon Jun 12 19:04:13 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: intc_fdt.c,v 1.1 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: intc_fdt.c,v 1.2 2023/06/12 19:04:13 skrll Exp $ */
/*-
* Copyright (c) 2023 The NetBSD Foundation, Inc.
@@ -29,8 +29,10 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_multiprocessor.h"
+
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: intc_fdt.c,v 1.1 2023/05/07 12:41:48 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: intc_fdt.c,v 1.2 2023/06/12 19:04:13 skrll Exp $");
#include <sys/param.h>
@@ -237,42 +239,59 @@ static void
intc_intr_handler(struct trapframe *tf, register_t epc, register_t status,
register_t cause)
{
+ const int ppl = splhigh();
struct cpu_info * const ci = curcpu();
- const int source = CAUSE_CODE(cause);
+ unsigned long pending;
+ int ipl;
KASSERT(CAUSE_INTERRUPT_P(cause));
struct intc_fdt_softc * const sc = intc_sc;
ci->ci_intr_depth++;
- struct intc_irq *irq = sc->sc_irq[source];
- KASSERTMSG(irq != NULL, "source %d\n", source);
- if (irq) {
- struct intc_irqhandler *iih;
-
- bool mpsafe = (irq->intr_istflags & IST_MPSAFE) != 0;
- struct clockframe cf = {
- .cf_epc = epc,
- .cf_status = status,
- .cf_intr_depth = ci->ci_intr_depth
- };
-
- if (!mpsafe) {
- KERNEL_LOCK(1, NULL);
- }
-
- TAILQ_FOREACH(iih, &irq->intr_handlers, ih_next) {
- int handled =
- iih->ih_fn(iih->ih_arg ? iih->ih_arg : &cf);
- if (handled)
- break;
- }
+ ci->ci_data.cpu_nintr++;
- if (!mpsafe) {
- KERNEL_UNLOCK_ONE(NULL);
+ while (ppl < (ipl = splintr(&pending))) {
+ if (pending == 0)
+ continue;
+
+ splx(ipl);
+
+ int source = ffs(pending) - 1;
+ struct intc_irq *irq = sc->sc_irq[source];
+ KASSERTMSG(irq != NULL, "source %d\n", source);
+
+ if (irq) {
+ struct intc_irqhandler *iih;
+
+ bool mpsafe =
+ source != IRQ_SUPERVISOR_EXTERNAL ||
+ (irq->intr_istflags & IST_MPSAFE) != 0;
+ struct clockframe cf = {
+ .cf_epc = epc,
+ .cf_status = status,
+ .cf_intr_depth = ci->ci_intr_depth
+ };
+
+ if (!mpsafe) {
+ KERNEL_LOCK(1, NULL);
+ }
+
+ TAILQ_FOREACH(iih, &irq->intr_handlers, ih_next) {
+ int handled =
+ iih->ih_fn(iih->ih_arg ? iih->ih_arg : &cf);
+ if (handled)
+ break;
+ }
+
+ if (!mpsafe) {
+ KERNEL_UNLOCK_ONE(NULL);
+ }
}
+ splhigh();
}
ci->ci_intr_depth--;
+ splx(ppl);
}
Index: src/sys/arch/riscv/fdt/riscv_platform.c
diff -u src/sys/arch/riscv/fdt/riscv_platform.c:1.1 src/sys/arch/riscv/fdt/riscv_platform.c:1.2
--- src/sys/arch/riscv/fdt/riscv_platform.c:1.1 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/fdt/riscv_platform.c Mon Jun 12 19:04:13 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: riscv_platform.c,v 1.1 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: riscv_platform.c,v 1.2 2023/06/12 19:04:13 skrll Exp $ */
/*-
* Copyright (c) 2023 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: riscv_platform.c,v 1.1 2023/05/07 12:41:48 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: riscv_platform.c,v 1.2 2023/06/12 19:04:13 skrll Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@@ -47,6 +47,7 @@ __KERNEL_RCSID(0, "$NetBSD: riscv_platfo
#include <uvm/uvm_extern.h>
#include <uvm/pmap/pmap_devmap.h>
+#include <riscv/fdt/riscv_fdtvar.h>
static const struct pmap_devmap *
riscv_platform_devmap(void)
@@ -74,12 +75,6 @@ riscv_platform_devmap(void)
}
-static void
-riscv_platform_bootstrap(void)
-{
-}
-
-
static u_int
riscv_platform_uart_freq(void)
{
@@ -88,8 +83,9 @@ riscv_platform_uart_freq(void)
static const struct fdt_platform riscv_platform = {
.fp_devmap = riscv_platform_devmap,
- .fp_bootstrap = riscv_platform_bootstrap,
+ .fp_bootstrap = riscv_fdt_cpu_bootstrap,
.fp_uart_freq = riscv_platform_uart_freq,
+ .fp_mpstart = riscv_fdt_cpu_mpstart,
};
FDT_PLATFORM(rv, FDT_PLATFORM_DEFAULT, &riscv_platform);
Index: src/sys/arch/riscv/include/cpu.h
diff -u src/sys/arch/riscv/include/cpu.h:1.11 src/sys/arch/riscv/include/cpu.h:1.12
--- src/sys/arch/riscv/include/cpu.h:1.11 Thu May 25 06:17:18 2023
+++ src/sys/arch/riscv/include/cpu.h Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.h,v 1.11 2023/05/25 06:17:18 skrll Exp $ */
+/* $NetBSD: cpu.h,v 1.12 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -63,14 +63,15 @@ struct cpu_info {
struct evcnt ci_ev_timer;
struct evcnt ci_ev_timer_missed;
+ u_long ci_cpu_freq; /* CPU frequency */
int ci_mtx_oldspl;
int ci_mtx_count;
-
- int ci_want_resched;
int ci_cpl;
- u_int ci_softints;
volatile u_int ci_intr_depth;
+ int ci_want_resched __aligned(COHERENCY_UNIT);
+ u_int ci_softints;
+
tlb_asid_t ci_pmap_asid_cur;
union pmap_segtab *ci_pmap_user_segtab;
@@ -81,6 +82,33 @@ struct cpu_info {
struct evcnt ci_ev_fpu_saves;
struct evcnt ci_ev_fpu_loads;
struct evcnt ci_ev_fpu_reenables;
+
+ struct pmap_tlb_info *ci_tlb_info;
+
+#ifdef MULTIPROCESSOR
+
+ volatile u_long ci_flags;
+ volatile u_long ci_request_ipis;
+ /* bitmask of IPIs requested */
+ u_long ci_active_ipis; /* bitmask of IPIs being serviced */
+
+ struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */
+ struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters */
+ struct evcnt ci_evcnt_synci_onproc_rqst;
+ struct evcnt ci_evcnt_synci_deferred_rqst;
+ struct evcnt ci_evcnt_synci_ipi_rqst;
+
+#define CPUF_PRIMARY __BIT(0) /* CPU is primary CPU */
+#define CPUF_PRESENT __BIT(1) /* CPU is present */
+#define CPUF_RUNNING __BIT(2) /* CPU is running */
+#define CPUF_PAUSED __BIT(3) /* CPU is paused */
+#define CPUF_USERPMAP __BIT(4) /* CPU has a user pmap activated */
+ kcpuset_t *ci_shootdowncpus;
+ kcpuset_t *ci_multicastcpus;
+ kcpuset_t *ci_watchcpus;
+ kcpuset_t *ci_ddbcpus;
+#endif
+
#if defined(GPROF) && defined(MULTIPROCESSOR)
struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
#endif
@@ -90,9 +118,62 @@ struct cpu_info {
#ifdef _KERNEL
+extern struct cpu_info *cpu_info[];
extern struct cpu_info cpu_info_store[];
-// This is also in <sys/lwp.h>
+
+#ifdef MULTIPROCESSOR
+extern u_int riscv_cpu_max;
+extern cpuid_t cpu_hartid[];
+
+void cpu_hatch(struct cpu_info *);
+
+void cpu_init_secondary_processor(int);
+void cpu_boot_secondary_processors(void);
+void cpu_mpstart(void);
+bool cpu_hatched_p(u_int);
+
+void cpu_clr_mbox(int);
+void cpu_set_hatched(int);
+
+
+void cpu_halt(void);
+void cpu_halt_others(void);
+bool cpu_is_paused(cpuid_t);
+void cpu_pause(void);
+void cpu_pause_others(void);
+void cpu_resume(cpuid_t);
+void cpu_resume_others(void);
+void cpu_debug_dump(void);
+
+extern kcpuset_t *cpus_running;
+extern kcpuset_t *cpus_hatched;
+extern kcpuset_t *cpus_paused;
+extern kcpuset_t *cpus_resumed;
+extern kcpuset_t *cpus_halted;
+
+/*
+ * definitions of cpu-dependent requirements
+ * referenced in generic code
+ */
+
+/*
+ * Send an inter-processor interrupt to each other CPU (excludes curcpu())
+ */
+void cpu_broadcast_ipi(int);
+
+/*
+ * Send an inter-processor interrupt to CPUs in kcpuset (excludes curcpu())
+ */
+void cpu_multicast_ipi(const kcpuset_t *, int);
+
+/*
+ * Send an inter-processor interrupt to another CPU.
+ */
+int cpu_send_ipi(struct cpu_info *, int);
+
+#endif
+
struct lwp;
static inline struct cpu_info *lwp_getcpu(struct lwp *);
@@ -118,9 +199,14 @@ void cpu_boot_secondary_processors(void)
#define CPU_INFO_ITERATOR cpuid_t
#ifdef MULTIPROCESSOR
-#define CPU_INFO_FOREACH(cii, ci) \
- (cii) = 0; ((ci) = cpu_infos[cii]) != NULL; (cii)++
+#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
+#define CPU_INFO_FOREACH(cii, ci) \
+ cii = 0, ci = &cpu_info_store[0]; \
+ ci != NULL; \
+ cii++, ncpu ? (ci = cpu_infos[cii]) \
+ : (ci = NULL)
#else
+#define CPU_IS_PRIMARY(ci) true
#define CPU_INFO_FOREACH(cii, ci) \
(cii) = 0, (ci) = curcpu(); (cii) == 0; (cii)++
#endif
Index: src/sys/arch/riscv/include/db_machdep.h
diff -u src/sys/arch/riscv/include/db_machdep.h:1.7 src/sys/arch/riscv/include/db_machdep.h:1.8
--- src/sys/arch/riscv/include/db_machdep.h:1.7 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/include/db_machdep.h Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: db_machdep.h,v 1.7 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: db_machdep.h,v 1.8 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -43,7 +43,8 @@ typedef long db_expr_t; /* expression -
typedef struct trapframe db_regs_t;
-extern const uint32_t cpu_Debugger_insn[1];
+extern const uint32_t cpu_Debugger_insn[];
+extern const uint32_t cpu_Debugger_ret[];
extern db_regs_t ddb_regs;
#define DDB_REGS (&ddb_regs)
@@ -57,7 +58,7 @@ extern db_regs_t ddb_regs;
/* Similar to PC_ADVANCE(), except only advance on cpu_Debugger()'s bpt */
#define PC_BREAK_ADVANCE(tf) do { \
if ((tf)->tf_pc == (register_t)cpu_Debugger_insn) \
- (tf)->tf_pc += BKPT_SIZE; \
+ (tf)->tf_pc = (register_t)cpu_Debugger_ret; \
} while(0)
#define BKPT_ADDR(addr) (addr) /* breakpoint address */
Index: src/sys/arch/riscv/include/intr.h
diff -u src/sys/arch/riscv/include/intr.h:1.3 src/sys/arch/riscv/include/intr.h:1.4
--- src/sys/arch/riscv/include/intr.h:1.3 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/include/intr.h Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: intr.h,v 1.3 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: intr.h,v 1.4 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
@@ -137,7 +137,7 @@ int splraise(int);
void splx(int);
void splx_noprof(int);
void spl0(void);
-int splintr(uint32_t *);
+int splintr(unsigned long *);
void softint_deliver(void);
@@ -148,7 +148,9 @@ struct cpu_info;
#define DISABLE_INTERRUPTS() csr_sstatus_clear(SR_SIE)
void ipi_init(struct cpu_info *);
-void ipi_process(struct cpu_info *, uint64_t);
+void ipi_process(struct cpu_info *, unsigned long);
+
+int riscv_ipi_intr(void *arg);
/*
* These make no sense *NOT* to be inlined.
Index: src/sys/arch/riscv/include/machdep.h
diff -u src/sys/arch/riscv/include/machdep.h:1.4 src/sys/arch/riscv/include/machdep.h:1.5
--- src/sys/arch/riscv/include/machdep.h:1.4 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/include/machdep.h Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: machdep.h,v 1.4 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: machdep.h,v 1.5 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2022 The NetBSD Foundation, Inc.
@@ -33,7 +33,7 @@
#define _RISCV_MACHDEP_H_
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.h,v 1.4 2023/05/07 12:41:48 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.h,v 1.5 2023/06/12 19:04:14 skrll Exp $");
#include <sys/proc.h>
#include <sys/lwp.h>
@@ -68,14 +68,15 @@ paddr_t init_mmu(paddr_t);
void init_riscv(register_t, paddr_t);
void riscv_timer_frequency_set(uint32_t); // which header?
+uint32_t
+ riscv_timer_frequency_get(void); // which header?
void riscv_timer_register(void (*timerfn)(void));
void riscv_intr_set_handler(void (*intr_handler)(struct trapframe *,
register_t, register_t, register_t));
+void riscv_timer_init(void);
int riscv_timer_intr(void *arg);
-void cpus_fdt_md_attach(device_t, device_t, void *);
-
void pt_dump(void (*)(const char *, ...));
Index: src/sys/arch/riscv/include/pmap.h
diff -u src/sys/arch/riscv/include/pmap.h:1.17 src/sys/arch/riscv/include/pmap.h:1.18
--- src/sys/arch/riscv/include/pmap.h:1.17 Mon May 8 08:07:36 2023
+++ src/sys/arch/riscv/include/pmap.h Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.17 2023/05/08 08:07:36 skrll Exp $ */
+/* $NetBSD: pmap.h,v 1.18 2023/06/12 19:04:14 skrll Exp $ */
/*
* Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
@@ -80,7 +80,7 @@
#define KERNEL_PID 0
#define PMAP_HWPAGEWALKER 1
-#define PMAP_TLB_MAX 1
+#define PMAP_TLB_MAX MAXCPUS
#ifdef _LP64
#define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
#define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
@@ -90,6 +90,8 @@
#endif
#define PMAP_TLB_NUM_PIDS (__SHIFTOUT_MASK(SATP_ASID) + 1)
#define PMAP_TLB_BITMAP_LENGTH PMAP_TLB_NUM_PIDS
+// Should use SBI TLB ops
+#define PMAP_TLB_NEED_SHOOTDOWN 1
#define PMAP_TLB_FLUSH_ASID_ON_RESET false
#define pmap_phys_address(x) (x)
@@ -113,6 +115,7 @@ pmap_procwr(struct proc *p, vaddr_t va,
#include <uvm/pmap/tlb.h>
#include <uvm/pmap/pmap_devmap.h>
#include <uvm/pmap/pmap_tlb.h>
+#include <uvm/pmap/pmap_synci.h>
#define PMAP_GROWKERNEL
#define PMAP_STEAL_MEMORY
@@ -124,21 +127,32 @@ struct pmap_md {
paddr_t md_ppn;
};
+static inline void
+pmap_md_icache_sync_all(void)
+{
+}
+
+static inline void
+pmap_md_icache_sync_range_index(vaddr_t va, vsize_t size)
+{
+}
+
struct vm_page *
- pmap_md_alloc_poolpage(int flags);
+ pmap_md_alloc_poolpage(int);
vaddr_t pmap_md_map_poolpage(paddr_t, vsize_t);
void pmap_md_unmap_poolpage(vaddr_t, vsize_t);
+
bool pmap_md_direct_mapped_vaddr_p(vaddr_t);
-bool pmap_md_io_vaddr_p(vaddr_t);
paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
vaddr_t pmap_md_direct_map_paddr(paddr_t);
void pmap_md_init(void);
-
-void pmap_md_xtab_activate(struct pmap *, struct lwp *);
-void pmap_md_xtab_deactivate(struct pmap *);
+bool pmap_md_io_vaddr_p(vaddr_t);
+bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
void pmap_md_pdetab_init(struct pmap *);
void pmap_md_pdetab_fini(struct pmap *);
-bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
+void pmap_md_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
+void pmap_md_xtab_activate(struct pmap *, struct lwp *);
+void pmap_md_xtab_deactivate(struct pmap *);
void pmap_bootstrap(vaddr_t, vaddr_t);
Index: src/sys/arch/riscv/riscv/clock_machdep.c
diff -u src/sys/arch/riscv/riscv/clock_machdep.c:1.4 src/sys/arch/riscv/riscv/clock_machdep.c:1.5
--- src/sys/arch/riscv/riscv/clock_machdep.c:1.4 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/riscv/clock_machdep.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: clock_machdep.c,v 1.4 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: clock_machdep.c,v 1.5 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
#include <sys/cdefs.h>
-__RCSID("$NetBSD: clock_machdep.c,v 1.4 2023/05/07 12:41:48 skrll Exp $");
+__RCSID("$NetBSD: clock_machdep.c,v 1.5 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@@ -42,8 +42,6 @@ __RCSID("$NetBSD: clock_machdep.c,v 1.4
#include <machine/sbi.h>
#include <machine/sysreg.h>
-static void riscv_timer_init(void);
-
static void (*_riscv_timer_init)(void) = riscv_timer_init;
static uint32_t timer_frequency;
@@ -70,6 +68,11 @@ riscv_timer_frequency_set(uint32_t freq)
timer_ticks_per_hz = freq / hz;
}
+uint32_t
+riscv_timer_frequency_get(void)
+{
+ return timer_frequency;
+}
void
riscv_timer_register(void (*timerfn)(void))
@@ -82,22 +85,22 @@ riscv_timer_register(void (*timerfn)(voi
_riscv_timer_init = timerfn;
}
-static void
+void
riscv_timer_init(void)
{
struct cpu_info * const ci = curcpu();
- uint64_t next;
-
ci->ci_lastintr = csr_time_read();
- next = ci->ci_lastintr + timer_ticks_per_hz;
+ uint64_t next = ci->ci_lastintr + timer_ticks_per_hz;
ci->ci_lastintr_scheduled = next;
sbi_set_timer(next); /* schedule next timer interrupt */
csr_sie_set(SIE_STIE); /* enable supervisor timer intr */
- tc.tc_frequency = timer_frequency;
- tc_init(&tc);
+ if (cpu_index(ci) == 0) {
+ tc.tc_frequency = timer_frequency;
+ tc_init(&tc);
+ }
}
Index: src/sys/arch/riscv/riscv/cpu.c
diff -u src/sys/arch/riscv/riscv/cpu.c:1.1 src/sys/arch/riscv/riscv/cpu.c:1.2
--- src/sys/arch/riscv/riscv/cpu.c:1.1 Sun May 7 12:41:48 2023
+++ src/sys/arch/riscv/riscv/cpu.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.1 2023/05/07 12:41:48 skrll Exp $ */
+/* $NetBSD: cpu.c,v 1.2 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2023 The NetBSD Foundation, Inc.
@@ -29,17 +29,23 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_multiprocessor.h"
+
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.1 2023/05/07 12:41:48 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.2 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
#include <sys/device.h>
+#include <sys/kmem.h>
+#include <sys/reboot.h>
#include <sys/sysctl.h>
#include <riscv/cpu.h>
#include <riscv/cpuvar.h>
+#include <riscv/machdep.h>
+#include <riscv/sbi.h>
#ifdef MULTIPROCESSOR
#define NCPUINFO MAXCPUS
@@ -59,6 +65,35 @@ void (*cpu_sdcache_wb_range)(vaddr_t, pa
u_int riscv_dcache_align = CACHE_LINE_SIZE;
u_int riscv_dcache_align_mask = CACHE_LINE_SIZE - 1;
+#define CPU_VENDOR_SIFIVE 0x489
+
+#define CPU_ARCH_7SERIES 0x8000000000000007
+
+struct cpu_arch {
+ uint64_t ca_id;
+ const char *ca_name;
+};
+
+struct cpu_arch cpu_arch_sifive[] = {
+ {
+ .ca_id = CPU_ARCH_7SERIES,
+ .ca_name = "7-Series Processor (E7, S7, U7 series)",
+ },
+ { }, // terminator
+};
+
+struct cpu_vendor {
+ uint32_t cv_id;
+ const char *cv_name;
+ struct cpu_arch *cv_arch;
+} cpu_vendors[] = {
+ {
+ .cv_id = CPU_VENDOR_SIFIVE,
+ .cv_name = "SiFive",
+ .cv_arch = cpu_arch_sifive,
+ },
+};
+
/*
* Our exported cpu_info structs; these will be first used by the
* secondary cpus as part of cpu_mpstart and the hatching process.
@@ -66,7 +101,12 @@ u_int riscv_dcache_align_mask = CACHE_
struct cpu_info cpu_info_store[NCPUINFO] = {
[0] = {
.ci_cpl = IPL_HIGH,
- .ci_curlwp = &lwp0
+ .ci_curlwp = &lwp0,
+ .ci_cpl = IPL_HIGH,
+#ifdef MULTIPROCESSOR
+ .ci_tlb_info = &pmap_tlb0_info,
+ .ci_flags = CPUF_PRIMARY | CPUF_PRESENT | CPUF_RUNNING,
+#endif
}
};
@@ -91,11 +131,57 @@ cpu_setup_sysctl(device_t dv, struct cpu
}
+static void
+cpu_identify(device_t self, struct cpu_info *ci)
+{
+ const register_t mvendorid = sbi_get_mvendorid().value;
+ const register_t marchid = sbi_get_marchid().value;
+ const uint32_t mimpid = sbi_get_mimpid().value;
+ struct cpu_arch *cv_arch = NULL;
+ const char *cv_name = NULL;
+ const char *ca_name = NULL;
+ char vendor[128];
+ char arch[128];
+
+ for (size_t i = 0; i < __arraycount(cpu_vendors); i++) {
+ if (mvendorid == cpu_vendors[i].cv_id) {
+ cv_name = cpu_vendors[i].cv_name;
+ cv_arch = cpu_vendors[i].cv_arch;
+ break;
+ }
+ }
+
+ if (cv_arch != NULL) {
+ for (size_t i = 0; cv_arch[i].ca_name != NULL; i++) {
+ if (marchid == cv_arch[i].ca_id) {
+ ca_name = cv_arch[i].ca_name;
+ break;
+ }
+ }
+ }
+
+ if (cv_name == NULL) {
+ snprintf(vendor, sizeof(vendor), "vendor %" PRIxREGISTER, mvendorid);
+ cv_name = vendor;
+ }
+ if (ca_name == NULL) {
+ snprintf(arch, sizeof(arch), "arch %" PRIxREGISTER, marchid);
+ ca_name = arch;
+ }
+
+ aprint_naive("\n");
+ aprint_normal(": %s %s imp. %" PRIx32 "\n", cv_name, ca_name, mimpid);
+ aprint_verbose_dev(ci->ci_dev,
+ "vendor 0x%" PRIxREGISTER " arch. %" PRIxREGISTER " imp. %" PRIx32 "\n",
+ mvendorid, marchid, mimpid);
+}
+
+
void
cpu_attach(device_t dv, cpuid_t id)
{
- struct cpu_info *ci = NULL;
const int unit = device_unit(dv);
+ struct cpu_info *ci;
if (unit == 0) {
ci = curcpu();
@@ -115,7 +201,6 @@ cpu_attach(device_t dv, cpuid_t id)
ci->ci_cpuid = id;
/* ci_cpuid is stored by own cpus when hatching */
- cpu_info[ncpu] = ci;
if (cpu_hatched_p(unit) == 0) {
ci->ci_dev = dv;
device_set_private(dv, ci);
@@ -134,16 +219,26 @@ cpu_attach(device_t dv, cpuid_t id)
ci->ci_dev = dv;
device_set_private(dv, ci);
- aprint_naive("\n");
- aprint_normal("\n");
- cpu_setup_sysctl(dv, ci);
+
+ cpu_identify(dv, ci);
#ifdef MULTIPROCESSOR
+ kcpuset_create(&ci->ci_shootdowncpus, true);
+
+ ipi_init(ci);
+
+ kcpuset_create(&ci->ci_multicastcpus, true);
+ kcpuset_create(&ci->ci_watchcpus, true);
+ kcpuset_create(&ci->ci_ddbcpus, true);
+
if (unit != 0) {
mi_cpu_attach(ci);
- pmap_tlb_info_attach(&pmap_tlb0_info, ci);
+ struct pmap_tlb_info *ti = kmem_zalloc(sizeof(*ti), KM_SLEEP);
+ pmap_tlb_info_init(ti);
+ pmap_tlb_info_attach(ti, ci);
}
#endif /* MULTIPROCESSOR */
+ cpu_setup_sysctl(dv, ci);
if (unit != 0) {
return;
@@ -160,10 +255,11 @@ cpu_attach(device_t dv, cpuid_t id)
void __noasan
cpu_init_secondary_processor(int cpuindex)
{
-#if 0
- struct cpu_info * ci = &cpu_info_store[cpuindex];
-#endif
+ cpu_set_hatched(cpuindex);
+ /*
+ * return to assembly to wait for cpu_boot_secondary_processors
+ */
}
@@ -175,5 +271,18 @@ void
cpu_hatch(struct cpu_info *ci)
{
KASSERT(curcpu() == ci);
+
+ ci->ci_cpu_freq = riscv_timer_frequency_get();
+
+ riscv_timer_init();
+
+ /*
+ * clear my bit of the mailbox to tell cpu_boot_secondary_processors().
+ * Consider that if there are cpu0, 1, 2, 3, and cpu2 is unresponsive,
+ * ci_index for each would be cpu0=0, cpu1=1, cpu2=undef, cpu3=2.
+ * therefore we have to use device_unit instead of ci_index for mbox.
+ */
+
+ cpu_clr_mbox(device_unit(ci->ci_dev));
}
#endif /* MULTIPROCESSOR */
Index: src/sys/arch/riscv/riscv/interrupt.c
diff -u src/sys/arch/riscv/riscv/interrupt.c:1.1 src/sys/arch/riscv/riscv/interrupt.c:1.2
--- src/sys/arch/riscv/riscv/interrupt.c:1.1 Sun May 7 12:41:49 2023
+++ src/sys/arch/riscv/riscv/interrupt.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: interrupt.c,v 1.1 2023/05/07 12:41:49 skrll Exp $ */
+/* $NetBSD: interrupt.c,v 1.2 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2022 The NetBSD Foundation, Inc.
@@ -29,9 +29,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_multiprocessor.h"
+
#include <sys/cdefs.h>
-__RCSID("$NetBSD: interrupt.c,v 1.1 2023/05/07 12:41:49 skrll Exp $");
+__RCSID("$NetBSD: interrupt.c,v 1.2 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -42,6 +44,7 @@ __RCSID("$NetBSD: interrupt.c,v 1.1 2023
#include <machine/locore.h>
#include <machine/machdep.h>
+#include <machine/sbi.h>
#include <riscv/dev/plicvar.h>
@@ -133,3 +136,59 @@ intr_disestablish(void *ih)
KASSERT(!cpu_intr_p());
KASSERT(!cpu_softintr_p());
}
+
+
+#ifdef MULTIPROCESSOR
+__CTASSERT(NIPIS < 16);
+
+int
+riscv_ipi_intr(void *arg)
+{
+ struct cpu_info * const ci = curcpu();
+ membar_acquire();
+
+ csr_sip_clear(SIP_SSIP); /* clean pending interrupt status */
+
+ unsigned long pending;
+ while ((pending = atomic_swap_ulong(&ci->ci_request_ipis, 0)) != 0) {
+ membar_acquire();
+ atomic_or_ulong(&ci->ci_active_ipis, pending);
+
+ ipi_process(ci, pending);
+
+ atomic_and_ulong(&ci->ci_active_ipis, pending);
+ }
+
+ return 1;
+}
+
+int
+cpu_send_ipi(struct cpu_info *ci, int req)
+{
+ KASSERT(req < NIPIS);
+ if (ci == NULL) {
+ CPU_INFO_ITERATOR cii;
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ if (ci != curcpu()) {
+ cpu_send_ipi(ci, req);
+ }
+ }
+ return 0;
+ }
+ const uint32_t ipi_mask = __BIT(req);
+
+ membar_release();
+ atomic_or_ulong(&ci->ci_request_ipis, ipi_mask);
+
+ membar_release();
+ unsigned long hartmask = 0;
+ const cpuid_t hartid = ci->ci_cpuid;
+ KASSERT(hartid < sizeof(unsigned long) * NBBY);
+ hartmask |= __BIT(hartid);
+ struct sbiret sbiret = sbi_send_ipi(hartmask, 0);
+
+ KASSERT(sbiret.error == SBI_SUCCESS);
+
+ return 0;
+}
+#endif /* MULTIPROCESSOR */
Index: src/sys/arch/riscv/riscv/cpu_subr.c
diff -u src/sys/arch/riscv/riscv/cpu_subr.c:1.2 src/sys/arch/riscv/riscv/cpu_subr.c:1.3
--- src/sys/arch/riscv/riscv/cpu_subr.c:1.2 Wed Nov 4 07:09:46 2020
+++ src/sys/arch/riscv/riscv/cpu_subr.c Mon Jun 12 19:04:14 2023
@@ -1,11 +1,11 @@
-/* $NetBSD: cpu_subr.c,v 1.2 2020/11/04 07:09:46 skrll Exp $ */
+/* $NetBSD: cpu_subr.c,v 1.3 2023/06/12 19:04:14 skrll Exp $ */
/*-
- * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Matt Thomas of 3am Software Foundry.
+ * by Nick Hudson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,8 +29,402 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
+#include "opt_ddb.h"
+#include "opt_multiprocessor.h"
+#include "opt_riscv_debug.h"
-__RCSID("$NetBSD: cpu_subr.c,v 1.2 2020/11/04 07:09:46 skrll Exp $");
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.3 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
+#include <sys/atomic.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/reboot.h>
+#include <sys/xcall.h>
+
+#include <machine/db_machdep.h>
+#include <machine/sbi.h>
+
+#ifdef DDB
+#include <ddb/db_output.h>
+#endif
+
+#ifdef VERBOSE_INIT_RISCV
+#define VPRINTF(...) printf(__VA_ARGS__)
+#else
+#define VPRINTF(...) __nothing
+#endif
+
+#ifdef MULTIPROCESSOR
+#define NCPUINFO MAXCPUS
+#else
+#define NCPUINFO 1
+#endif /* MULTIPROCESSOR */
+
+unsigned long cpu_hartid[NCPUINFO] = {
+ [0 ... NCPUINFO - 1] = ~0,
+};
+
+#ifdef MULTIPROCESSOR
+
+kcpuset_t *cpus_halted;
+kcpuset_t *cpus_hatched;
+kcpuset_t *cpus_paused;
+kcpuset_t *cpus_resumed;
+kcpuset_t *cpus_running;
+
+#define CPUINDEX_DIVISOR (sizeof(u_long) * NBBY)
+
+#define N howmany(MAXCPUS, CPUINDEX_DIVISOR)
+
+/* cpu_hatch_ipi needs fixing for > 1 */
+CTASSERT(N == 1);
+volatile u_long riscv_cpu_hatched[N] __cacheline_aligned = { };
+volatile u_long riscv_cpu_mbox[N] __cacheline_aligned = { };
+u_int riscv_cpu_max = 1;
+
+/* IPI all APs to GO! */
+static void
+cpu_ipi_aps(void)
+{
+ unsigned long hartmask = 0;
+ // BP is index 0
+ for (size_t i = 1; i < ncpu; i++) {
+ const cpuid_t hartid = cpu_hartid[i];
+ KASSERT(hartid < sizeof(unsigned long) * NBBY);
+ hartmask |= __BIT(hartid);
+ }
+ struct sbiret sbiret = sbi_send_ipi(hartmask, 0);
+
+ KASSERT(sbiret.error == SBI_SUCCESS);
+}
+
+void
+cpu_boot_secondary_processors(void)
+{
+ u_int cpuno;
+
+ if ((boothowto & RB_MD1) != 0)
+ return;
+
+ VPRINTF("%s: starting secondary processors\n", __func__);
+
+ /*
+ * send mbox to have secondary processors do cpu_hatch()
+ * store-release matches locore.S
+ */
+ asm volatile("fence rw,w");
+ for (size_t n = 0; n < __arraycount(riscv_cpu_mbox); n++)
+ atomic_or_ulong(&riscv_cpu_mbox[n], riscv_cpu_hatched[n]);
+ cpu_ipi_aps();
+
+ /* wait for all cpus to have done cpu_hatch() */
+ for (cpuno = 1; cpuno < ncpu; cpuno++) {
+ if (!cpu_hatched_p(cpuno))
+ continue;
+
+ const size_t off = cpuno / CPUINDEX_DIVISOR;
+ const u_long bit = __BIT(cpuno % CPUINDEX_DIVISOR);
+
+ /* load-acquire matches cpu_clr_mbox */
+ while (atomic_load_acquire(&riscv_cpu_mbox[off]) & bit) {
+ /* spin - it shouldn't be long */
+ ;
+ }
+ }
+
+ VPRINTF("%s: secondary processors hatched\n", __func__);
+}
+
+bool
+cpu_hatched_p(u_int cpuindex)
+{
+ const u_int off = cpuindex / CPUINDEX_DIVISOR;
+ const u_int bit = cpuindex % CPUINDEX_DIVISOR;
+
+ /* load-acquire matches cpu_set_hatched */
+ return (atomic_load_acquire(&riscv_cpu_hatched[off]) & __BIT(bit)) != 0;
+}
+
+
+void
+cpu_set_hatched(int cpuindex)
+{
+
+ const size_t off = cpuindex / CPUINDEX_DIVISOR;
+ const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
+
+ /* store-release matches cpu_hatched_p */
+ asm volatile("fence rw, w" ::: "memory");
+ atomic_or_ulong(&riscv_cpu_hatched[off], bit);
+
+ asm volatile("fence w, rw" ::: "memory");
+}
+
+void
+cpu_clr_mbox(int cpuindex)
+{
+
+ const size_t off = cpuindex / CPUINDEX_DIVISOR;
+ const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
+
+ /* store-release matches locore.S */
+ asm volatile("fence rw,w" ::: "memory");
+ atomic_and_ulong(&riscv_cpu_mbox[off], ~bit);
+
+ asm volatile("fence w, rw" ::: "memory");
+}
+
+
+void
+cpu_broadcast_ipi(int tag)
+{
+
+ /*
+ * No reason to remove ourselves since multicast_ipi will do that
+ * for us.
+ */
+ cpu_multicast_ipi(cpus_running, tag);
+}
+
+void
+cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
+{
+ struct cpu_info * const ci = curcpu();
+ kcpuset_t *kcp2 = ci->ci_multicastcpus;
+
+ if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
+ return;
+
+ kcpuset_copy(kcp2, kcp);
+ kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
+ for (unsigned int cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
+ kcpuset_clear(kcp2, --cii);
+ (void)cpu_send_ipi(cpu_lookup(cii), tag);
+ }
+}
+
+static void
+cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
+{
+ bool done = false;
+ struct cpu_info * const ci = curcpu();
+ kcpuset_t *kcp = ci->ci_watchcpus;
+
+ /* some finite amount of time */
+
+ for (u_long limit = curcpu()->ci_cpu_freq / 10; !done && limit--; ) {
+ kcpuset_copy(kcp, watchset);
+ kcpuset_intersect(kcp, wanted);
+ done = kcpuset_match(kcp, wanted);
+ }
+
+ if (!done) {
+ cpuid_t cii;
+ kcpuset_copy(kcp, wanted);
+ kcpuset_remove(kcp, watchset);
+ if ((cii = kcpuset_ffs(kcp)) != 0) {
+ printf("Failed to %s:", s);
+ do {
+ kcpuset_clear(kcp, --cii);
+ printf(" cpu%lu", cii);
+ } while ((cii = kcpuset_ffs(kcp)) != 0);
+ printf("\n");
+ }
+ }
+}
+
+/*
+ * Halt this cpu
+ */
+void
+cpu_halt(void)
+{
+ cpuid_t cii = cpu_index(curcpu());
+
+ printf("cpu%lu: shutting down\n", cii);
+ kcpuset_atomic_set(cpus_halted, cii);
+ spl0(); /* allow interrupts e.g. further ipi ? */
+ for (;;) ; /* spin */
+
+ /* NOTREACHED */
+}
+
+/*
+ * Halt all running cpus, excluding current cpu.
+ */
+void
+cpu_halt_others(void)
+{
+ kcpuset_t *kcp;
+
+ // If we are the only CPU running, there's nothing to do.
+ if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset))
+ return;
+
+ // Get all running CPUs
+ kcpuset_clone(&kcp, cpus_running);
+ // Remove ourself
+ kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset);
+ // Remove any halted CPUs
+ kcpuset_remove(kcp, cpus_halted);
+ // If there are CPUs left, send the IPIs
+ if (!kcpuset_iszero(kcp)) {
+ cpu_multicast_ipi(kcp, IPI_HALT);
+ cpu_ipi_wait("halt", cpus_halted, kcp);
+ }
+ kcpuset_destroy(kcp);
+
+ /*
+ * TBD
+ * Depending on available firmware methods, other cpus will
+ * either shut down themselves, or spin and wait for us to
+ * stop them.
+ */
+}
+
+/*
+ * Pause this cpu
+ */
+void
+cpu_pause(void )
+{
+ const int s = splhigh();
+ cpuid_t cii = cpu_index(curcpu());
+
+ if (__predict_false(cold)) {
+ splx(s);
+ return;
+ }
+
+ do {
+ kcpuset_atomic_set(cpus_paused, cii);
+ do {
+ ;
+ } while (kcpuset_isset(cpus_paused, cii));
+ kcpuset_atomic_set(cpus_resumed, cii);
+#if defined(DDB)
+ if (ddb_running_on_this_cpu_p())
+ cpu_Debugger();
+ if (ddb_running_on_any_cpu_p())
+ continue;
+#endif
+ } while (false);
+
+ splx(s);
+}
+
+/*
+ * Pause all running cpus, excluding current cpu.
+ */
+void
+cpu_pause_others(void)
+{
+ struct cpu_info * const ci = curcpu();
+
+ if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
+ return;
+
+ kcpuset_t *kcp = ci->ci_ddbcpus;
+
+ kcpuset_copy(kcp, cpus_running);
+ kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
+ kcpuset_remove(kcp, cpus_paused);
+
+ cpu_broadcast_ipi(IPI_SUSPEND);
+ cpu_ipi_wait("pause", cpus_paused, kcp);
+}
+
+/*
+ * Resume a single cpu
+ */
+void
+cpu_resume(cpuid_t cii)
+{
+
+ if (__predict_false(cold))
+ return;
+
+ struct cpu_info * const ci = curcpu();
+ kcpuset_t *kcp = ci->ci_ddbcpus;
+
+ kcpuset_set(kcp, cii);
+ kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
+ kcpuset_atomic_clear(cpus_paused, cii);
+
+ cpu_ipi_wait("resume", cpus_resumed, kcp);
+}
+
+/*
+ * Resume all paused cpus.
+ */
+void
+cpu_resume_others(void)
+{
+
+ if (__predict_false(cold))
+ return;
+
+ struct cpu_info * const ci = curcpu();
+ kcpuset_t *kcp = ci->ci_ddbcpus;
+
+ kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
+ kcpuset_copy(kcp, cpus_paused);
+ kcpuset_atomicly_remove(cpus_paused, cpus_paused);
+
+ /* CPUs awake on cpus_paused clear */
+ cpu_ipi_wait("resume", cpus_resumed, kcp);
+}
+
+bool
+cpu_is_paused(cpuid_t cii)
+{
+
+ return !cold && kcpuset_isset(cpus_paused, cii);
+}
+
+#ifdef DDB
+void
+cpu_debug_dump(void)
+{
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ char running, hatched, paused, resumed, halted;
+ db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS(A/R)\n");
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
+ running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
+ paused = (kcpuset_isset(cpus_paused, cpu_index(ci)) ? 'P' : '-');
+ resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
+ halted = (kcpuset_isset(cpus_halted, cpu_index(ci)) ? 'h' : '-');
+ db_printf("%3d 0x%03lx %c%c%c%c%c %p "
+ "%3d %3d %3d "
+ "0x%02lx/0x%02lx\n",
+ cpu_index(ci), ci->ci_cpuid,
+ running, hatched, paused, resumed, halted,
+ ci, ci->ci_cpl, ci->ci_intr_depth, ci->ci_mtx_count,
+ ci->ci_active_ipis, ci->ci_request_ipis);
+ }
+}
+#endif
+
+void
+xc_send_ipi(struct cpu_info *ci)
+{
+ KASSERT(kpreempt_disabled());
+ KASSERT(curcpu() != ci);
+
+ cpu_send_ipi(ci, IPI_XCALL);
+}
+
+void
+cpu_ipi(struct cpu_info *ci)
+{
+ KASSERT(kpreempt_disabled());
+ KASSERT(curcpu() != ci);
+
+ cpu_send_ipi(ci, IPI_GENERIC);
+}
+
+#endif
Index: src/sys/arch/riscv/riscv/db_interface.c
diff -u src/sys/arch/riscv/riscv/db_interface.c:1.2 src/sys/arch/riscv/riscv/db_interface.c:1.3
--- src/sys/arch/riscv/riscv/db_interface.c:1.2 Wed Oct 26 23:38:08 2022
+++ src/sys/arch/riscv/riscv/db_interface.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: db_interface.c,v 1.2 2022/10/26 23:38:08 riastradh Exp $ */
+/* $NetBSD: db_interface.c,v 1.3 2023/06/12 19:04:14 skrll Exp $ */
/*
* Mach Operating System
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.2 2022/10/26 23:38:08 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.3 2023/06/12 19:04:14 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_multiprocessor.h"
@@ -118,20 +118,23 @@ kdb_trap(int type, db_regs_t *regs)
} else {
if (old_ddb_cpu != cpu_me) {
KASSERT(cpu_is_paused(cpu_me));
- cpu_pause(regs);
+ cpu_pause();
splx(s);
return 1;
}
}
- KASSERT(! cpu_is_paused(cpu_me));
+ KASSERT(!cpu_is_paused(cpu_me));
#endif
+ struct cpu_info * const ci = curcpu();
ddb_regs = *regs;
+ ci->ci_ddb_regs = &ddb_regs;
db_active++;
cnpollc(1);
db_trap(type, 0 /*code*/);
cnpollc(0);
db_active--;
+ ci->ci_ddb_regs = NULL;
*regs = ddb_regs;
#if defined(MULTIPROCESSOR)
@@ -140,7 +143,7 @@ kdb_trap(int type, db_regs_t *regs)
} else {
cpu_resume(ddb_cpu);
if (first_in_ddb)
- cpu_pause(regs);
+ cpu_pause();
}
#endif
Index: src/sys/arch/riscv/riscv/db_machdep.c
diff -u src/sys/arch/riscv/riscv/db_machdep.c:1.10 src/sys/arch/riscv/riscv/db_machdep.c:1.11
--- src/sys/arch/riscv/riscv/db_machdep.c:1.10 Wed Oct 12 07:53:56 2022
+++ src/sys/arch/riscv/riscv/db_machdep.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: db_machdep.c,v 1.10 2022/10/12 07:53:56 simonb Exp $ */
+/* $NetBSD: db_machdep.c,v 1.11 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
#include <sys/cdefs.h>
-__RCSID("$NetBSD: db_machdep.c,v 1.10 2022/10/12 07:53:56 simonb Exp $");
+__RCSID("$NetBSD: db_machdep.c,v 1.11 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
@@ -89,7 +89,6 @@ int
db_rw_ddbreg(const struct db_variable *vp, db_expr_t *valp, int rw)
{
struct trapframe * const tf = curcpu()->ci_ddb_regs;
- KASSERT(db_regs <= vp && vp < db_regs + __arraycount(db_regs));
const uintptr_t addr = (uintptr_t)tf + (uintptr_t)vp->valuep;
if (vp->modif != NULL && vp->modif[0] == 'i') {
if (rw == DB_VAR_GET) {
Index: src/sys/arch/riscv/riscv/genassym.cf
diff -u src/sys/arch/riscv/riscv/genassym.cf:1.14 src/sys/arch/riscv/riscv/genassym.cf:1.15
--- src/sys/arch/riscv/riscv/genassym.cf:1.14 Sun May 7 12:41:49 2023
+++ src/sys/arch/riscv/riscv/genassym.cf Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.14 2023/05/07 12:41:49 skrll Exp $
+# $NetBSD: genassym.cf,v 1.15 2023/06/12 19:04:14 skrll Exp $
#-
# Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -65,6 +65,10 @@ define SIE_SEIE SIE_SEIE
define SIE_STIE SIE_STIE
define SIE_SSIE SIE_SSIE
+define SIP_SEIP SIP_SEIP
+define SIP_STIP SIP_STIP
+define SIP_SSIP SIP_SSIP
+
define CAUSE_SYSCALL CAUSE_SYSCALL
ifdef _LP64
@@ -85,7 +89,7 @@ define IPL_SOFTBIO IPL_SOFTBIO
define IPL_SOFTCLOCK IPL_SOFTCLOCK
define IPL_NONE IPL_NONE
-#define CPU_MAXNUM CPU_MAXNUM
+define MAXCPUS MAXCPUS
define TF_LEN sizeof(struct trapframe)
define TF_RA offsetof(struct trapframe, tf_reg[_X_RA])
Index: src/sys/arch/riscv/riscv/locore.S
diff -u src/sys/arch/riscv/riscv/locore.S:1.41 src/sys/arch/riscv/riscv/locore.S:1.42
--- src/sys/arch/riscv/riscv/locore.S:1.41 Sun May 7 12:41:49 2023
+++ src/sys/arch/riscv/riscv/locore.S Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.41 2023/05/07 12:41:49 skrll Exp $ */
+/* $NetBSD: locore.S,v 1.42 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014, 2022 The NetBSD Foundation, Inc.
@@ -30,13 +30,20 @@
*/
#include "opt_console.h"
+#include "opt_multiprocessor.h"
#include "opt_riscv_debug.h"
#include <machine/asm.h>
#include "assym.h"
- .globl _C_LABEL(exception_userexit)
- .globl _C_LABEL(cpu_Debugger_insn)
+#define BOOT_AP_STACKSIZE 1024 /* size of temporary stack for APs */
+#define NBBY_SHIFT 3 /* log2(8 bits per byte) */
+
+#define PRINTS(string) \
+ call locore_prints ; \
+ .asciz string ; \
+ .align 3 ; \
+
#if defined(VERBOSE_INIT_RISCV)
@@ -78,18 +85,6 @@ ENTRY_NP(start)
li s0, SR_FS
csrc sstatus, s0 // disable FP
- /*
- * atomically swap a non-zero value into hart_boot. If we see zero
- * we won in race to become BP.
- */
- li s1, 1
- la s0, hart_boot
-
- amoswap.w s0, s1, (s0)
- bnez s0, mpentry
- /*
- * The BP only executes from here on.
- */
mv s10, a0 // copy hartid
mv s11, a1 // copy dtb PA
@@ -375,12 +370,189 @@ vstart:
END(start)
-ENTRY(mpentry)
+#if defined(MULTIPROCESSOR)
+
+// a0 is hartid
+// a1 is the cookie from sbi_hart_start
+ENTRY(cpu_mpstart)
+ mv s10, a0 // copy hartid
+ mv s11, a1 // copy sbi_hart_start cookie
+
+ /*
+ * resolve our cpuindex. each hartid is stored in
+ * extern unsigned long cpu_hartid[MAXCPUS]
+ */
+ PTR_LA t0, _C_LABEL(cpu_hartid)
+ li t6, MAXCPUS
+ li t1, 0
1:
+ addi t1, t1, 1
+ bgeu t1, t6, toomanyharts /* cpuindex >= MAXCPUS ? */
+
+ slli t2, t1, LONG_SCALESHIFT
+ add t2, t0, t2
+ LONG_L t3, 0(t2) /* cpu_hartid[cpuindex] */
+ bne t3, s10, 1b
+
+ mv s9, t1 /* s9 = cpuindex */
+
+ /*
+ * s9 = cpuindex
+ */
+
+ /* set stack pointer for boot */
+ li t1, BOOT_AP_STACKSIZE // XXXNH do a shift
+ mul t1, s9, t1
+ PTR_LA t0, _C_LABEL(bootstk)
+ /* sp = bootstk + (BOOT_AP_STACKSIZE * cpuindex) */
+ add sp, t0, t1
+
+
+ /*
+ * Calculate the difference between the VA and PA for start and
+ * keep in s8.
+ */
+ PTR_LA t0, start
+ PTR_L s8, .Lstart
+
+ sub s8, s8, t0
+
+#ifdef _LP64
+ PTR_LA s4, _C_LABEL(l2_pte)
+#else
+ PTR_LA s4, _C_LABEL(l1_pte)
+#endif
+
+ // s4 is satp address....
+ // s8 is kern_vtopdiff
+ //
+
+ /* Set supervisor trap vector base register */
+ PTR_LA t0, vmpstart
+ add t0, t0, s8
+ csrw stvec, t0
+
+ /* Set supervisor address translation and protection register */
+ srli t1, s4, PGSHIFT
+#ifdef _LP64
+ li t0, SATP_MODE_SV39
+#else
+ li t0, SATP_MODE_SV32
+#endif
+ or t0, t0, t1
+ sfence.vma
+ csrw satp, t0
+
+ .align 2
+ .global vmpstart
+vmpstart:
+ // MMU is on!
+ csrw sscratch, zero // zero in sscratch to mark kernel
+
+ /* Set the global pointer */
+ .option push
+ .option norelax
+ lla gp, __global_pointer$
+ .option pop
+
+ /* Set SP to VA */
+ add sp, sp, s8
+
+ /* Set supervisor trap vector base register with ipi_handler */
+ PTR_LA a0, _C_LABEL(ipi_handler)
+ csrw stvec, a0
+ csrsi sie, SIE_SSIE
+ csrsi sstatus, SR_SIE // enable interrupts
+
+ li tp, 0
+ mv a0, s9
+ call _C_LABEL(cpu_init_secondary_processor)
+
+ /* t3 = __BIT(cpuindex % (sizeof(u_long) * NBBY)) */
+ li t3, 1
+ andi t0, s9, (1U << (LONG_SCALESHIFT + NBBY_SHIFT)) - 1
+ sll t3, t3, t0
+
+ /* t4 = &riscv_cpu_mbox[cpuindex / (sizeof(u_long) * NBBY)] */
+ PTR_LA t0, _C_LABEL(riscv_cpu_mbox)
+ srli t1, s9, LONG_SCALESHIFT + NBBY_SHIFT
+ slli t1, t1, LONG_SCALESHIFT
+ add t4, t0, t1
+
+ /* wait for the mailbox start bit to become true */
+1:
+ fence rw, r /* matches cpu_boot_secondary_processors */
+ LONG_L t0, 0(t4)
+ and t0, t0, t3
+ bne t0, zero, 9f
wfi
j 1b
-END(mpentry)
+9:
+
+ /* Set supervisor trap vector base register */
+ PTR_LA a0, _C_LABEL(cpu_exception_handler)
+ csrw stvec, a0
+
+ li t0, CI_SIZE
+ mul t0, s9, t0
+ PTR_LA t1, _C_LABEL(cpu_info_store)
+ add a0, t0, t1 /* a0 = &cpu_info_store[cpuindex] */
+
+ /*
+ * set curlwp (tp and curcpu()->ci_curlwp) now we know the
+ * idle lwp from curcpu()->ci_idlelwp
+ */
+ PTR_L tp, CI_IDLELWP(a0) /* tp = curcpu()->ci_idlelwp */
+ PTR_S tp, CI_CURLWP(a0) /* curlwp is idlelwp */
+
+ /* get my stack from lwp */
+ PTR_L t2, L_PCB(tp) /* t2 = lwp_getpcb(idlelwp) */
+ li t3, UPAGES * PAGE_SIZE
+ add t2, t2, t3
+ addi sp, t2, -TF_LEN /* sp = pcb + USPACE - TF_LEN */
+
+ li s0, 0 /* trace back starts here (fp = 0) */
+ PTR_L a0, L_CPU(tp) /* curlwp->l_cpu */
+ call _C_LABEL(cpu_hatch)
+
+ li s0, 0 // zero frame pointer
+ tail idle_loop
+ /* No return from idle_loop */
+END(cpu_mpstart)
+
+
+toomanyharts:
+ PRINTS("too many harts, or hart id doens't exist in cpu_hart[]\n")
+1: wfi
+ j 1b
+
+/*
+ * A very basic exception handler to just return when an IPI comes in during
+ * AP bringup.
+ *
+ * The handler address needs to have bottom two bits as zero.
+ */
+ .align 2
+
+ipi_handler:
+ csrrw tp, sscratch, tp // swap scratch and thread pointer
+ bnez tp, 1f // tp != 0, something went wrong.
+
+ csrr tp, scause // get cause
+ bgez tp, 2f // MSB is set if interrupt
+
+ csrw sip, zero // clear all interrupts
+ csrrw tp, sscratch, zero // get back our thread pointer
+ sret
+
+1:
+ wfi
+ j 1b
+2:
+ wfi
+ j 2b
+#endif
.align 3
.Lstart:
@@ -436,6 +608,16 @@ ENTRY_NP(clear_bss)
END(clear_bss)
+ .globl _C_LABEL(cpu_Debugger_insn)
+ .globl _C_LABEL(cpu_Debugger_ret)
+
+ENTRY_NP(cpu_Debugger)
+cpu_Debugger_insn:
+ ebreak
+cpu_Debugger_ret:
+ ret
+END(cpu_Debugger)
+
#if defined(VERBOSE_INIT_RISCV)
ENTRY_NP(locore_prints)
addi sp, sp, -(SZREG * 2)
@@ -533,6 +715,11 @@ END(locore_printxnl)
hart_boot:
.word 0
+ /*
+ * Allocate some memory after the kernel image for stacks and
+ * bootstrap L1PT
+ */
+
.section "_init_memory", "aw", %nobits
.align PGSHIFT
.global _C_LABEL(lwp0uspace)
@@ -540,21 +727,10 @@ _C_LABEL(lwp0uspace):
.space UPAGES * PAGE_SIZE
bootstk:
-
- /*
- * Allocate some memory after the kernel image for stacks and
- * bootstrap L1PT
- */
- .align PGSHIFT
-#if 0
-.global start_stacks_bottom
- .global start_stacks_top
-start_stacks_bottom:
- .space INIT_ARM_TOTAL_STACK
-start_stacks_top:
+#ifdef MULTIPROCESSOR
+ .space BOOT_AP_STACKSIZE * (MAXCPUS - 1)
#endif
-
.section "_init_memory", "aw", %nobits
.align PGSHIFT
mmutables_start:
@@ -570,9 +746,3 @@ l1_pte:
.space PAGE_SIZE
mmutables_end:
-
-ENTRY_NP(cpu_Debugger)
-cpu_Debugger_insn:
- sbreak
- ret
-END(cpu_Debugger)
Index: src/sys/arch/riscv/riscv/pmap_machdep.c
diff -u src/sys/arch/riscv/riscv/pmap_machdep.c:1.17 src/sys/arch/riscv/riscv/pmap_machdep.c:1.18
--- src/sys/arch/riscv/riscv/pmap_machdep.c:1.17 Sat Jun 10 07:02:26 2023
+++ src/sys/arch/riscv/riscv/pmap_machdep.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_machdep.c,v 1.17 2023/06/10 07:02:26 skrll Exp $ */
+/* $NetBSD: pmap_machdep.c,v 1.18 2023/06/12 19:04:14 skrll Exp $ */
/*
* Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
@@ -31,11 +31,12 @@
*/
#include "opt_riscv_debug.h"
+#include "opt_multiprocessor.h"
#define __PMAP_PRIVATE
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pmap_machdep.c,v 1.17 2023/06/10 07:02:26 skrll Exp $");
+__RCSID("$NetBSD: pmap_machdep.c,v 1.18 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
#include <sys/buf.h>
@@ -161,13 +162,20 @@ pmap_md_ok_to_steal_p(const uvm_physseg_
return true;
}
+#ifdef MULTIPROCESSOR
+void
+pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
+{
+}
+#endif
+
void
pmap_md_xtab_activate(struct pmap *pmap, struct lwp *l)
{
// UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
-// struct cpu_info * const ci = curcpu();
+ struct cpu_info * const ci = curcpu();
struct pmap_asid_info * const pai = PMAP_PAI(pmap, cpu_tlb_info(ci));
uint64_t satp =
@@ -297,6 +305,12 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v
VPRINTF("common ");
pmap_bootstrap_common();
+#ifdef MULTIPROCESSOR
+ VPRINTF("cpusets ");
+ struct cpu_info * const ci = curcpu();
+ kcpuset_create(&ci->ci_shootdowncpus, true);
+#endif
+
VPRINTF("bs_pde %p ", bootstrap_pde);
// kend = (kend + 0x200000 - 1) & -0x200000;
@@ -416,88 +430,3 @@ pmap_kenter_range(vaddr_t va, paddr_t pa
return 0;
}
-
-
-/* -------------------------------------------------------------------------- */
-
-tlb_asid_t
-tlb_get_asid(void)
-{
- return csr_asid_read();
-}
-
-void
-tlb_set_asid(tlb_asid_t asid, struct pmap *pm)
-{
- csr_asid_write(asid);
-}
-
-#if 0
-void tlb_invalidate_all(void);
-void tlb_invalidate_globals(void);
-#endif
-
-void
-tlb_invalidate_asids(tlb_asid_t lo, tlb_asid_t hi)
-{
- for (; lo <= hi; lo++) {
- __asm __volatile("sfence.vma zero, %[asid]"
- : /* output operands */
- : [asid] "r" (lo)
- : "memory");
- }
-}
-void
-tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
-{
- if (asid == KERNEL_PID) {
- __asm __volatile("sfence.vma %[va]"
- : /* output operands */
- : [va] "r" (va)
- : "memory");
- } else {
- __asm __volatile("sfence.vma %[va], %[asid]"
- : /* output operands */
- : [va] "r" (va), [asid] "r" (asid)
- : "memory");
- }
-}
-
-bool
-tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p)
-{
- if (asid == KERNEL_PID) {
- __asm __volatile("sfence.vma %[va]"
- : /* output operands */
- : [va] "r" (va)
- : "memory");
- } else {
- __asm __volatile("sfence.vma %[va], %[asid]"
- : /* output operands */
- : [va] "r" (va), [asid] "r" (asid)
- : "memory");
- }
- return true;
-}
-
-u_int
-tlb_record_asids(u_long *ptr, tlb_asid_t asid_max)
-{
- memset(ptr, 0xff, PMAP_TLB_NUM_PIDS / NBBY);
- ptr[0] = -2UL;
-
- return PMAP_TLB_NUM_PIDS - 1;
-}
-
-void
-tlb_walk(void *ctx, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
-{
- /* no way to view the TLB */
-}
-
-#if 0
-void tlb_enter_addr(size_t, const struct tlbmask *);
-void tlb_read_entry(size_t, struct tlbmask *);
-void tlb_write_entry(size_t, const struct tlbmask *);
-void tlb_dump(void (*)(const char *, ...));
-#endif
Index: src/sys/arch/riscv/riscv/riscv_machdep.c
diff -u src/sys/arch/riscv/riscv/riscv_machdep.c:1.28 src/sys/arch/riscv/riscv/riscv_machdep.c:1.29
--- src/sys/arch/riscv/riscv/riscv_machdep.c:1.28 Sun May 28 12:56:56 2023
+++ src/sys/arch/riscv/riscv/riscv_machdep.c Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: riscv_machdep.c,v 1.28 2023/05/28 12:56:56 skrll Exp $ */
+/* $NetBSD: riscv_machdep.c,v 1.29 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014, 2019, 2022 The NetBSD Foundation, Inc.
@@ -31,10 +31,11 @@
#include "opt_ddb.h"
#include "opt_modular.h"
+#include "opt_multiprocessor.h"
#include "opt_riscv_debug.h"
#include <sys/cdefs.h>
-__RCSID("$NetBSD: riscv_machdep.c,v 1.28 2023/05/28 12:56:56 skrll Exp $");
+__RCSID("$NetBSD: riscv_machdep.c,v 1.29 2023/06/12 19:04:14 skrll Exp $");
#include <sys/param.h>
@@ -372,14 +373,13 @@ cpu_signotify(struct lwp *l)
if (l->l_cpu != curcpu()) {
#ifdef MULTIPROCESSOR
- cpu_send_ipi(ci, IPI_AST);
+ cpu_send_ipi(l->l_cpu, IPI_AST);
#endif
} else {
l->l_md.md_astpending = 1; /* force call to ast() */
}
}
-
void
cpu_need_proftick(struct lwp *l)
{
@@ -520,6 +520,26 @@ cpu_startup(void)
format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
printf("avail memory = %s\n", pbuf);
+#ifdef MULTIPROCESSOR
+ kcpuset_create(&cpus_halted, true);
+ KASSERT(cpus_halted != NULL);
+
+ kcpuset_create(&cpus_hatched, true);
+ KASSERT(cpus_hatched != NULL);
+
+ kcpuset_create(&cpus_paused, true);
+ KASSERT(cpus_paused != NULL);
+
+ kcpuset_create(&cpus_resumed, true);
+ KASSERT(cpus_resumed != NULL);
+
+ kcpuset_create(&cpus_running, true);
+ KASSERT(cpus_running != NULL);
+
+ kcpuset_set(cpus_hatched, cpu_number());
+ kcpuset_set(cpus_running, cpu_number());
+#endif
+
fdtbus_intr_init();
}
@@ -689,7 +709,7 @@ init_riscv(register_t hartid, paddr_t dt
fdtbus_init(fdt_data);
/* Lookup platform specific backend */
- const struct fdt_platform *plat = fdt_platform_find();
+ const struct fdt_platform * const plat = fdt_platform_find();
if (plat == NULL)
panic("Kernel does not support this device");
@@ -871,6 +891,16 @@ init_riscv(register_t hartid, paddr_t dt
/* Finish setting up lwp0 on our end before we call main() */
riscv_init_lwp0_uarea();
+
+
+ error = 0;
+ if ((boothowto & RB_MD1) == 0) {
+ VPRINTF("mpstart\n");
+ if (plat->fp_mpstart)
+ error = plat->fp_mpstart();
+ }
+ if (error)
+ printf("AP startup problems\n");
}
@@ -930,8 +960,6 @@ dump_ln_table(paddr_t pdp_pa, int topbit
}
}
-#endif
-
void
pt_dump(void (*pr)(const char *, ...) __printflike(1, 2))
{
@@ -958,6 +986,7 @@ pt_dump(void (*pr)(const char *, ...) __
dump_ln_table(satp_pa, topbit, level, 0, pr);
#endif
}
+#endif
void
consinit(void)
Index: src/sys/arch/riscv/riscv/spl.S
diff -u src/sys/arch/riscv/riscv/spl.S:1.7 src/sys/arch/riscv/riscv/spl.S:1.8
--- src/sys/arch/riscv/riscv/spl.S:1.7 Sat Jun 10 09:18:50 2023
+++ src/sys/arch/riscv/riscv/spl.S Mon Jun 12 19:04:14 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: spl.S,v 1.7 2023/06/10 09:18:50 skrll Exp $ */
+/* $NetBSD: spl.S,v 1.8 2023/06/12 19:04:14 skrll Exp $ */
/*-
* Copyright (c) 2014,2023 The NetBSD Foundation, Inc.
@@ -32,7 +32,7 @@
#include <machine/asm.h>
#include "assym.h"
-__RCSID("$NetBSD: spl.S,v 1.7 2023/06/10 09:18:50 skrll Exp $")
+__RCSID("$NetBSD: spl.S,v 1.8 2023/06/12 19:04:14 skrll Exp $")
#define SZINT (1 << INT_SCALESHIFT)
@@ -50,9 +50,9 @@ _C_LABEL(ipl_sie_map):
.word SIE_SEIE | SIE_STIE /* IPL_SCHED */
.word SIE_SEIE | SIE_STIE | SIE_SSIE /* IPL_HIGH */
+
ENTRY_NP(splx)
// a0 = new IPL
-// csrci sstatus, SR_SIE // disable interrupts
PTR_L a3, L_CPU(tp) // get curcpu()
INT_L t0, CI_CPL(a3) // get current IPL
bge a0, t0, 2f
@@ -68,13 +68,6 @@ ENTRY_NP(splx)
// a0 = new ipl
INT_S a0, CI_CPL(a3) // change IPL
csrs sie, a1
-#if 0
- beqz t0, 2f
-
- //call riscv_do_pending_irqs
-2:
- csrsi sstatus, SR_SIE // enable interrupts
-#endif
#ifdef __HAVE_FAST_SOFTINTS
INT_L t4, CI_SOFTINTS(a3) // get softint mask
@@ -87,21 +80,20 @@ ENTRY_NP(splx)
tail _C_LABEL(softint_deliver)
3:
#endif /* __HAVE_FAST_SOFTINTS */
+2:
ret // return (or do softints)
END(splx)
+
#if IPL_NONE != 0
#error IPL_NONE is not 0
#endif
ENTRY_NP(spl0)
-// csrci sstatus, SR_SIE // disable interrupts
PTR_L a3, L_CPU(tp) // get curcpu()
INT_S zero, CI_CPL(a3) // set current IPL to IPL_NONE
li t2, (SIE_SEIE | SIE_STIE | SIE_SSIE)
csrs sie, t2
- //call riscv_do_pending_irqs
-
csrsi sstatus, SR_SIE // enable interrupts
#ifdef __HAVE_FAST_SOFTINTS
// spl0() is only called rarely so the overhead of always calling
@@ -113,14 +105,31 @@ ENTRY_NP(spl0)
END(spl0)
+ENTRY(splintr)
+ csrr t0, sip
+ li t1, IPL_NONE
+ andi t0, t0, (SIP_SEIP | SIP_STIP | SIP_SSIP)
+ beq t0, zero, 1f // If nothing is pending return IPL_NONE
+ PTR_LA t3, _C_LABEL(ipl_sie_map)
-
-
-
-
-
+ li t1, IPL_VM
+ li t2, IPL_HIGH + 1
+2:
+ INT_L t5, IPL_VM * (1 << INT_SCALESHIFT)(t3)
+ PTR_ADDI t3, t3, 1 << INT_SCALESHIFT
+ not t5, t5
+ and t5, t5, t0
+ beq t5, zero, 1f
+ addi t1, t1, 1
+ bne t1, t2, 2b
+
+1:
+ LONG_S t0, 0(a0)
+ mv a0, t1
+ jr ra
+END(splintr)
ENTRY_NP(splsoftclock)
@@ -129,44 +138,41 @@ ENTRY_NP(splsoftclock)
j _splraise
END(splsoftclock)
+
ENTRY_NP(splsoftbio)
li t1, IPL_SOFTBIO
INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_SOFTBIO
j _splraise
END(splsoftbio)
+
ENTRY_NP(splsoftnet)
li t1, IPL_SOFTNET
INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_SOFTNET
j _splraise
END(splsoftnet)
+
ENTRY_NP(splsoftserial)
li t1, IPL_SOFTSERIAL
INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_SOFTSERIAL
j _splraise
END(splsoftserial)
+
ENTRY_NP(splvm)
li t1, IPL_VM
INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_VM
j _splraise
END(splvm)
+
ENTRY_NP(splsched)
li t1, IPL_SCHED
INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_SCHED
j _splraise
END(splsched)
-#if 0
-ENTRY_NP(splddb)
- li t1, IPL_DDB
- INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_DDB
- j _splraise
-END(splddb)
-#endif
-
ENTRY_NP(splhigh)
li t1, IPL_HIGH
INT_L t0, _C_LABEL(ipl_sie_map) + SZINT * IPL_HIGH
@@ -174,7 +180,6 @@ ENTRY_NP(splhigh)
END(splhigh)
-
ENTRY_NP(splraise)
// a0 = new higher IPL
mv t1, a0
@@ -184,13 +189,11 @@ ENTRY_NP(splraise)
INT_L t0, 0(a1)
_splraise:
-// csrc sstatus, SR_SIE // disable interrupts
PTR_L a3, L_CPU(tp) // get curcpu()
INT_L a0, CI_CPL(a3) // get current IPL
bge a0, t1, 2f // already at same or higher?
csrc sie, t0 //
INT_S t1, CI_CPL(a3) // change IPL
2:
-// csrsi sstatus, SR_SIE // enable interrupts
ret
END(splraise)
Added files:
Index: src/sys/arch/riscv/fdt/riscv_fdtvar.h
diff -u /dev/null src/sys/arch/riscv/fdt/riscv_fdtvar.h:1.1
--- /dev/null Mon Jun 12 19:04:14 2023
+++ src/sys/arch/riscv/fdt/riscv_fdtvar.h Mon Jun 12 19:04:13 2023
@@ -0,0 +1,41 @@
+/* $NetBSD: riscv_fdtvar.h,v 1.1 2023/06/12 19:04:13 skrll Exp $ */
+
+/*-
+ * Copyright (c) 2023 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Nick Hudson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RISCV_RISCV_FDTVAR_H
+#define _RISCV_RISCV_FDTVAR_H
+
+void riscv_fdt_cpu_bootstrap(void);
+int riscv_fdt_cpu_mpstart(void);
+void riscv_fdt_cpu_hatch_register(void *, void (*)(void *, struct cpu_info *));
+void riscv_fdt_cpu_hatch(struct cpu_info *);
+
+#endif /* !_RISCV_RISCV_FDTVAR_H */
Index: src/sys/arch/riscv/riscv/ipifuncs.c
diff -u /dev/null src/sys/arch/riscv/riscv/ipifuncs.c:1.1
--- /dev/null Mon Jun 12 19:04:14 2023
+++ src/sys/arch/riscv/riscv/ipifuncs.c Mon Jun 12 19:04:14 2023
@@ -0,0 +1,168 @@
+/* $NetBSD: ipifuncs.c,v 1.1 2023/06/12 19:04:14 skrll Exp $ */
+
+/*-
+ * Copyright (c) 2010 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.1 2023/06/12 19:04:14 skrll Exp $");
+
+#include <sys/param.h>
+
+#include <sys/cpu.h>
+#include <sys/device.h>
+#include <sys/intr.h>
+#include <sys/ipi.h>
+#include <sys/xcall.h>
+
+#include <uvm/uvm_extern.h>
+#include <uvm/pmap/pmap_synci.h>
+#include <uvm/pmap/pmap_tlb.h>
+
+static void ipi_halt(void) __dead;
+
+static const char * const ipi_names[] = {
+ [IPI_NOP] = "ipi nop",
+ [IPI_AST] = "ipi ast",
+ [IPI_SHOOTDOWN] = "ipi shootdown",
+ [IPI_SYNCICACHE]= "ipi isync",
+ [IPI_KPREEMPT] = "ipi kpreempt",
+ [IPI_SUSPEND] = "ipi suspend",
+ [IPI_HALT] = "ipi halt",
+ [IPI_XCALL] = "ipi xcall",
+ [IPI_GENERIC] = "ipi generic",
+};
+
+static void
+ipi_nop(struct cpu_info *ci)
+{
+ /*
+ * This is just a reason to get an interrupt so we get
+ * kicked out of cpu_idle().
+ */
+}
+
+static void
+ipi_ast(struct cpu_info *ci)
+{
+ ci->ci_onproc->l_md.md_astpending = 1;
+}
+
+static void
+ipi_shootdown(struct cpu_info *ci)
+{
+ pmap_tlb_shootdown_process();
+}
+
+static inline void
+ipi_syncicache(struct cpu_info *ci)
+{
+ pmap_tlb_syncicache_wanted(ci);
+}
+
+#ifdef __HAVE_PREEMPTION
+static inline void
+ipi_kpreempt(struct cpu_info *ci)
+{
+ softint_trigger(SOFTINT_KPREEMPT);
+}
+#endif
+
+/*
+ * Process cpu stop-self event.
+ * XXX could maybe add/use locoresw halt function?
+ */
+static void
+ipi_halt(void)
+{
+ const u_int my_cpu = cpu_number();
+ printf("cpu%u: shutting down\n", my_cpu);
+ kcpuset_set(cpus_halted, my_cpu);
+ splhigh();
+ for (;;)
+ ;
+ /* NOTREACHED */
+}
+
+void
+ipi_process(struct cpu_info *ci, unsigned long ipi_mask)
+{
+ KASSERT(cpu_intr_p());
+
+ if (ipi_mask & __BIT(IPI_NOP)) {
+ ci->ci_evcnt_per_ipi[IPI_NOP].ev_count++;
+ ipi_nop(ci);
+ }
+ if (ipi_mask & __BIT(IPI_AST)) {
+ ci->ci_evcnt_per_ipi[IPI_AST].ev_count++;
+ ipi_ast(ci);
+ }
+ if (ipi_mask & __BIT(IPI_SHOOTDOWN)) {
+ ci->ci_evcnt_per_ipi[IPI_SHOOTDOWN].ev_count++;
+ ipi_shootdown(ci);
+ }
+ if (ipi_mask & __BIT(IPI_SYNCICACHE)) {
+ ci->ci_evcnt_per_ipi[IPI_SYNCICACHE].ev_count++;
+ ipi_syncicache(ci);
+ }
+ if (ipi_mask & __BIT(IPI_SUSPEND)) {
+ ci->ci_evcnt_per_ipi[IPI_SUSPEND].ev_count++;
+ cpu_pause();
+ }
+ if (ipi_mask & __BIT(IPI_HALT)) {
+ ci->ci_evcnt_per_ipi[IPI_HALT].ev_count++;
+ ipi_halt();
+ }
+ if (ipi_mask & __BIT(IPI_XCALL)) {
+ ci->ci_evcnt_per_ipi[IPI_XCALL].ev_count++;
+ xc_ipi_handler();
+ }
+ if (ipi_mask & __BIT(IPI_GENERIC)) {
+ ci->ci_evcnt_per_ipi[IPI_GENERIC].ev_count++;
+ ipi_cpu_handler();
+ }
+#ifdef __HAVE_PREEMPTION
+ if (ipi_mask & __BIT(IPI_KPREEMPT)) {
+ ci->ci_evcnt_per_ipi[IPI_KPREEMPT].ev_count++;
+ ipi_kpreempt(ci);
+ }
+#endif
+}
+
+void
+ipi_init(struct cpu_info *ci)
+{
+ evcnt_attach_dynamic(&ci->ci_evcnt_all_ipis, EVCNT_TYPE_INTR,
+ NULL, device_xname(ci->ci_dev), "ipi");
+
+ for (size_t i = 0; i < NIPIS; i++) {
+ KASSERTMSG(ipi_names[i] != NULL, "%zu", i);
+ evcnt_attach_dynamic(&ci->ci_evcnt_per_ipi[i], EVCNT_TYPE_INTR,
+ NULL, device_xname(ci->ci_dev), ipi_names[i]);
+ }
+}
Index: src/sys/arch/riscv/riscv/riscv_tlb.c
diff -u /dev/null src/sys/arch/riscv/riscv/riscv_tlb.c:1.1
--- /dev/null Mon Jun 12 19:04:14 2023
+++ src/sys/arch/riscv/riscv/riscv_tlb.c Mon Jun 12 19:04:14 2023
@@ -0,0 +1,127 @@
+/* $NetBSD: riscv_tlb.c,v 1.1 2023/06/12 19:04:14 skrll Exp $ */
+
+/*
+ * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
+ * Nick Hudson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_riscv_debug.h"
+#include "opt_multiprocessor.h"
+
+#include <sys/cdefs.h>
+__RCSID("$NetBSD: riscv_tlb.c,v 1.1 2023/06/12 19:04:14 skrll Exp $");
+
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include <uvm/uvm.h>
+
+tlb_asid_t
+tlb_get_asid(void)
+{
+ return csr_asid_read();
+}
+
+void
+tlb_set_asid(tlb_asid_t asid, struct pmap *pm)
+{
+ csr_asid_write(asid);
+}
+
+void
+tlb_invalidate_all(void)
+{
+ asm volatile("sfence.vma"
+ : /* output operands */
+ : /* input operands */
+ : "memory");
+}
+
+void
+tlb_invalidate_globals(void)
+{
+ tlb_invalidate_all();
+}
+
+void
+tlb_invalidate_asids(tlb_asid_t lo, tlb_asid_t hi)
+{
+ for (; lo <= hi; lo++) {
+ asm volatile("sfence.vma zero, %[asid]"
+ : /* output operands */
+ : [asid] "r" (lo)
+ : "memory");
+ }
+}
+void
+tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
+{
+ if (asid == KERNEL_PID) {
+ asm volatile("sfence.vma %[va]"
+ : /* output operands */
+ : [va] "r" (va)
+ : "memory");
+ } else {
+ asm volatile("sfence.vma %[va], %[asid]"
+ : /* output operands */
+ : [va] "r" (va), [asid] "r" (asid)
+ : "memory");
+ }
+}
+
+bool
+tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p)
+{
+ if (asid == KERNEL_PID) {
+ asm volatile("sfence.vma %[va]"
+ : /* output operands */
+ : [va] "r" (va)
+ : "memory");
+ } else {
+ asm volatile("sfence.vma %[va], %[asid]"
+ : /* output operands */
+ : [va] "r" (va), [asid] "r" (asid)
+ : "memory");
+ }
+ return true;
+}
+
+u_int
+tlb_record_asids(u_long *ptr, tlb_asid_t asid_max)
+{
+ memset(ptr, 0xff, PMAP_TLB_NUM_PIDS / NBBY);
+ ptr[0] = -2UL;
+
+ return PMAP_TLB_NUM_PIDS - 1;
+}
+
+void
+tlb_walk(void *ctx, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
+{
+ /* no way to view the TLB */
+}