Module Name: src Committed By: skrll Date: Fri Oct 6 11:45:16 UTC 2023
Modified Files: src/sys/arch/ia64/ia64: context.S db_trace.c exception.S genassym.cf interrupt.c locore.S machdep.c pal.S pmap.c process_machdep.c setjmp.S sys_machdep.c syscall_stubs.S vm_machdep.c Log Message: Trailing whitespace. To generate a diff of this commit: cvs rdiff -u -r1.8 -r1.9 src/sys/arch/ia64/ia64/context.S cvs rdiff -u -r1.5 -r1.6 src/sys/arch/ia64/ia64/db_trace.c cvs rdiff -u -r1.6 -r1.7 src/sys/arch/ia64/ia64/exception.S \ src/sys/arch/ia64/ia64/process_machdep.c \ src/sys/arch/ia64/ia64/sys_machdep.c \ src/sys/arch/ia64/ia64/syscall_stubs.S cvs rdiff -u -r1.16 -r1.17 src/sys/arch/ia64/ia64/genassym.cf cvs rdiff -u -r1.11 -r1.12 src/sys/arch/ia64/ia64/interrupt.c cvs rdiff -u -r1.9 -r1.10 src/sys/arch/ia64/ia64/locore.S cvs rdiff -u -r1.44 -r1.45 src/sys/arch/ia64/ia64/machdep.c cvs rdiff -u -r1.1 -r1.2 src/sys/arch/ia64/ia64/pal.S \ src/sys/arch/ia64/ia64/setjmp.S cvs rdiff -u -r1.42 -r1.43 src/sys/arch/ia64/ia64/pmap.c cvs rdiff -u -r1.18 -r1.19 src/sys/arch/ia64/ia64/vm_machdep.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/ia64/ia64/context.S diff -u src/sys/arch/ia64/ia64/context.S:1.8 src/sys/arch/ia64/ia64/context.S:1.9 --- src/sys/arch/ia64/ia64/context.S:1.8 Sat Apr 8 17:38:43 2017 +++ src/sys/arch/ia64/ia64/context.S Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: context.S,v 1.8 2017/04/08 17:38:43 scole Exp $ */ +/* $NetBSD: context.S,v 1.9 2023/10/06 11:45:16 skrll Exp $ */ /* * Copyright (c) 2003 Marcel Moolenaar @@ -657,7 +657,7 @@ ENTRY(restore_high_fp, 1) ldf.fill f32=[r32],32 ;; ldf.fill f33=[r31],32 - ldf.fill f34=[r32],32 + ldf.fill f34=[r32],32 ;; ldf.fill f35=[r31],32 ldf.fill f36=[r32],32 Index: src/sys/arch/ia64/ia64/db_trace.c diff -u src/sys/arch/ia64/ia64/db_trace.c:1.5 src/sys/arch/ia64/ia64/db_trace.c:1.6 --- src/sys/arch/ia64/ia64/db_trace.c:1.5 Sun Jul 31 19:10:54 2016 +++ src/sys/arch/ia64/ia64/db_trace.c Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: db_trace.c,v 1.5 2016/07/31 19:10:54 dholland Exp $ */ +/* $NetBSD: db_trace.c,v 1.6 2023/10/06 11:45:16 skrll Exp $ */ /* Inspired by reading alpha/db_trace.c */ @@ -122,7 +122,7 @@ db_stack_trace_print(db_expr_t addr, boo } else (*pr) ("Unwind from arbitrary addresses unimplemented. \n"); - + if (trace_thread) { (*pr)("trace by pid unimplemented. \n"); @@ -139,14 +139,14 @@ extern vsize_t ia64_unwindtablen; /* Generates initial unwind frame context based on the contents - * of the trap frame, by consulting the Unwind library + * of the trap frame, by consulting the Unwind library * staterecord. If a register is of type enum UNSAVED, we fetch * the live value of the register from the trapframe. */ void initunwindframe(struct unwind_frame *uwf, struct trapframe *tf) - + { uwf->rp = tf->tf_special.rp; @@ -165,10 +165,10 @@ initunwindframe(struct unwind_frame *uwf } - -/* Single step the frame backward. + +/* Single step the frame backward. * Assumes unwind_frame is setup already. */ @@ -185,7 +185,7 @@ rewindframe(struct unwind_frame *uwf, db debug_frame_dump_XXX(uwf); #endif - /* Stomp on rp and pfs + /* Stomp on rp and pfs */ KASSERT(ip >= kernstart); patchunwindframe(uwf, ip - kernstart, kernstart); Index: src/sys/arch/ia64/ia64/exception.S diff -u src/sys/arch/ia64/ia64/exception.S:1.6 src/sys/arch/ia64/ia64/exception.S:1.7 --- src/sys/arch/ia64/ia64/exception.S:1.6 Sat Apr 8 17:42:47 2017 +++ src/sys/arch/ia64/ia64/exception.S Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: exception.S,v 1.6 2017/04/08 17:42:47 scole Exp $ */ +/* $NetBSD: exception.S,v 1.7 2023/10/06 11:45:16 skrll Exp $ */ /*- * Copyright (c) 2003,2004 Marcel Moolenaar @@ -31,7 +31,7 @@ /* __FBSDID("$FreeBSD: releng/10.1/sys/ia64/ia64/exception.S 268200 2014-07-02 23:47:43Z marcel $"); */ #include "assym.h" - + /* * Nested TLB restart tokens. These are used by the * nested TLB handler for jumping back to the code @@ -54,7 +54,7 @@ .size ia64_kptdir, 8 ia64_kptdir: data8 0 - + #ifdef XTRACE .align 8 @@ -747,7 +747,7 @@ ENTRY_NOPROFILE(exception_restore, 0) { .mmi mov ar.rsc=r31 // setup for loadrs mov ar.k7=r16 - addl r29=NTLBRT_RESTORE,r0 // 22-bit restart token + addl r29=NTLBRT_RESTORE,r0 // 22-bit restart token ;; } @@ -873,7 +873,7 @@ IVT_ENTRY(Instruction_TLB, 0x0400) ;; add r21=16,r18 // tag add r20=24,r18 // collision chain - ;; + ;; ld8 r21=[r21] // check VHPT tag ld8 r20=[r20] // bucket head ;; @@ -909,7 +909,7 @@ IVT_ENTRY(Instruction_TLB, 0x0400) or r21=r21,r22 ;; st8 [r20]=r21,8 - ;; + ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical @@ -932,7 +932,7 @@ IVT_ENTRY(Instruction_TLB, 0x0400) ;; itc.i r21 // and place in TLB ssm psr.dt - ;; + ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi @@ -946,7 +946,7 @@ IVT_ENTRY(Instruction_TLB, 0x0400) mov pr=r17,0x1ffff // restore predicates ;; srlz.d - ;; + ;; CALL(trap, 20, cr.ifa) // Page Not Present trap IVT_END(Instruction_TLB) @@ -959,7 +959,7 @@ IVT_ENTRY(Data_TLB, 0x0800) ;; add r21=16,r18 // tag add r20=24,r18 // collision chain - ;; + ;; ld8 r21=[r21] // check VHPT tag ld8 r20=[r20] // bucket head ;; @@ -975,7 +975,7 @@ IVT_ENTRY(Data_TLB, 0x0800) ;; 1: rsm psr.dt // turn off data translations dep r20=0,r20,61,3 // convert vhpt ptr to physical - ;; + ;; srlz.d // serialize ld8 r20=[r20] // first entry ;; @@ -995,7 +995,7 @@ IVT_ENTRY(Data_TLB, 0x0800) or r21=r21,r22 ;; st8 [r20]=r21,8 - ;; + ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical @@ -1018,7 +1018,7 @@ IVT_ENTRY(Data_TLB, 0x0800) ;; itc.d r21 // and place in TLB ssm psr.dt - ;; + ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi @@ -1032,7 +1032,7 @@ IVT_ENTRY(Data_TLB, 0x0800) mov pr=r17,0x1ffff // restore predicates ;; srlz.d - ;; + ;; CALL(trap, 20, cr.ifa) // Page Not Present trap IVT_END(Data_TLB) @@ -1262,7 +1262,7 @@ IVT_ENTRY(Dirty_Bit, 0x2000) ;; ttag r19=r16 add r20=24,r18 // collision chain - ;; + ;; ld8 r20=[r20] // bucket head ;; rsm psr.dt // turn off data translations @@ -1287,7 +1287,7 @@ IVT_ENTRY(Dirty_Bit, 0x2000) or r21=r22,r21 // set dirty & access bit ;; st8 [r20]=r21,8 // store back - ;; + ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical @@ -1310,7 +1310,7 @@ IVT_ENTRY(Dirty_Bit, 0x2000) ;; itc.d r21 // and place in TLB ssm psr.dt - ;; + ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi @@ -1336,7 +1336,7 @@ IVT_ENTRY(Instruction_Access_Bit, 0x2400 ;; ttag r19=r16 add r20=24,r18 // collision chain - ;; + ;; ld8 r20=[r20] // bucket head ;; rsm psr.dt // turn off data translations @@ -1384,7 +1384,7 @@ IVT_ENTRY(Instruction_Access_Bit, 0x2400 ;; itc.i r21 // and place in TLB ssm psr.dt - ;; + ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi // walker will retry the access @@ -1435,7 +1435,7 @@ IVT_ENTRY(Data_Access_Bit, 0x2800) or r21=r22,r21 // set accessed bit ;; st8 [r20]=r21,8 // store back - ;; + ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical @@ -1458,7 +1458,7 @@ IVT_ENTRY(Data_Access_Bit, 0x2800) ;; itc.d r21 // and place in TLB ssm psr.dt - ;; + ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi // walker will retry the access Index: src/sys/arch/ia64/ia64/process_machdep.c diff -u src/sys/arch/ia64/ia64/process_machdep.c:1.6 src/sys/arch/ia64/ia64/process_machdep.c:1.7 --- src/sys/arch/ia64/ia64/process_machdep.c:1.6 Sat Jan 4 00:10:02 2014 +++ src/sys/arch/ia64/ia64/process_machdep.c Fri Oct 6 11:45:16 2023 @@ -1,11 +1,11 @@ -/* $NetBSD: process_machdep.c,v 1.6 2014/01/04 00:10:02 dsl Exp $ */ +/* $NetBSD: process_machdep.c,v 1.7 2023/10/06 11:45:16 skrll Exp $ */ /* * Copyright (c) 2006 The NetBSD Foundation, Inc. * All rights reserved. * * - * Author: + * Author: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.6 2014/01/04 00:10:02 dsl Exp $"); +__KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.7 2023/10/06 11:45:16 skrll Exp $"); #include <sys/param.h> #include <sys/ptrace.h> Index: src/sys/arch/ia64/ia64/sys_machdep.c diff -u src/sys/arch/ia64/ia64/sys_machdep.c:1.6 src/sys/arch/ia64/ia64/sys_machdep.c:1.7 --- src/sys/arch/ia64/ia64/sys_machdep.c:1.6 Mon Jul 20 04:41:37 2009 +++ src/sys/arch/ia64/ia64/sys_machdep.c Fri Oct 6 11:45:16 2023 @@ -1,11 +1,11 @@ -/* $NetBSD: sys_machdep.c,v 1.6 2009/07/20 04:41:37 kiyohara Exp $ */ +/* $NetBSD: sys_machdep.c,v 1.7 2023/10/06 11:45:16 skrll Exp $ */ /* * Copyright (c) 2006 The NetBSD Foundation, Inc. * All rights reserved. * * - * Author: + * Author: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ -__KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.6 2009/07/20 04:41:37 kiyohara Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.7 2023/10/06 11:45:16 skrll Exp $"); #include <sys/param.h> #include <sys/systm.h> Index: src/sys/arch/ia64/ia64/syscall_stubs.S diff -u src/sys/arch/ia64/ia64/syscall_stubs.S:1.6 src/sys/arch/ia64/ia64/syscall_stubs.S:1.7 --- src/sys/arch/ia64/ia64/syscall_stubs.S:1.6 Thu May 2 17:34:01 2019 +++ src/sys/arch/ia64/ia64/syscall_stubs.S Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: syscall_stubs.S,v 1.6 2019/05/02 17:34:01 scole Exp $ */ +/* $NetBSD: syscall_stubs.S,v 1.7 2023/10/06 11:45:16 skrll Exp $ */ /*- * Copyright (c) 2002, 2003 Marcel Moolenaar @@ -74,10 +74,10 @@ * information they need and we have the freedom to move code around. */ -/* XXX fix */ -#define syscall 0 +/* XXX fix */ +#define syscall 0 #define SYS_sigreturn 0 - + .section .text.gateway, "ax" .align PAGE_SIZE .global ia64_gateway_page @@ -222,12 +222,12 @@ ENTRY_NOPROFILE(epc_sigtramp, 0) ;; } add out0=16,sp -/* XXX fix */ +/* XXX fix */ #if 0 CALLSYS_NOERROR(sigreturn) mov out0=ret0 CALLSYS_NOERROR(exit) -#endif +#endif END(epc_sigtramp) .align PAGE_SIZE @@ -546,7 +546,7 @@ epc_syscall_setup_ia32: mov r13=r0 ;; } - + ld8 r24=[r14],32 ld8 r27=[r15],16 ;; Index: src/sys/arch/ia64/ia64/genassym.cf diff -u src/sys/arch/ia64/ia64/genassym.cf:1.16 src/sys/arch/ia64/ia64/genassym.cf:1.17 --- src/sys/arch/ia64/ia64/genassym.cf:1.16 Thu May 2 17:31:56 2019 +++ src/sys/arch/ia64/ia64/genassym.cf Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -# $NetBSD: genassym.cf,v 1.16 2019/05/02 17:31:56 scole Exp $ +# $NetBSD: genassym.cf,v 1.17 2023/10/06 11:45:16 skrll Exp $ # # Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -119,7 +119,7 @@ define SIZEOF_SPECIAL sizeof(struct _sp define MC_SPECIAL offsetof(struct __mcontext, mc_special) define UC_MCONTEXT offsetof(struct __ucontext, uc_mcontext) -# general constants +# general constants define VM_MAX_ADDRESS VM_MAX_ADDRESS # Important offsets into the user struct & associated constants @@ -127,5 +127,5 @@ define UPAGES UPAGES define FRAME_SYSCALL FRAME_SYSCALL -define PC_CURLWP offsetof(struct cpu_info, ci_curlwp) -define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) +define PC_CURLWP offsetof(struct cpu_info, ci_curlwp) +define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) Index: src/sys/arch/ia64/ia64/interrupt.c diff -u src/sys/arch/ia64/ia64/interrupt.c:1.11 src/sys/arch/ia64/ia64/interrupt.c:1.12 --- src/sys/arch/ia64/ia64/interrupt.c:1.11 Sat Nov 21 20:50:08 2020 +++ src/sys/arch/ia64/ia64/interrupt.c Fri Oct 6 11:45:16 2023 @@ -1,21 +1,21 @@ -/* $NetBSD: interrupt.c,v 1.11 2020/11/21 20:50:08 thorpej Exp $ */ +/* $NetBSD: interrupt.c,v 1.12 2023/10/06 11:45:16 skrll Exp $ */ /*- * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. * All rights reserved. * * Authors: Keith Bostic, Chris G. Demetriou - * + * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or software.distribut...@cs.cmu.edu @@ -33,7 +33,7 @@ */ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ -__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.11 2020/11/21 20:50:08 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.12 2023/10/06 11:45:16 skrll Exp $"); #include "opt_ddb.h" Index: src/sys/arch/ia64/ia64/locore.S diff -u src/sys/arch/ia64/ia64/locore.S:1.9 src/sys/arch/ia64/ia64/locore.S:1.10 --- src/sys/arch/ia64/ia64/locore.S:1.9 Tue Nov 20 20:36:24 2018 +++ src/sys/arch/ia64/ia64/locore.S Fri Oct 6 11:45:16 2023 @@ -1,5 +1,5 @@ -/* $NetBSD: locore.S,v 1.9 2018/11/20 20:36:24 scole Exp $ */ - +/* $NetBSD: locore.S,v 1.10 2023/10/06 11:45:16 skrll Exp $ */ + /*- * Copyright (c) 1998 Doug Rabson * All rights reserved. @@ -42,7 +42,7 @@ kstack: .space KSTACK_PAGES * PAGE_SIZE .global kstack_top kstack_top: - + .text /* @@ -81,7 +81,7 @@ ENTRY_NOPROFILE(start, 1) movl r16=pa_bootinfo ;; } -{ .mmi +{ .mmi st8 [r16]=r8 // save the PA of the bootinfo block loadrs // invalidate regs mov r17=IA64_DCR_DEFAULT @@ -94,18 +94,18 @@ ENTRY_NOPROFILE(start, 1) ;; } { .mmi - srlz.d + srlz.d alloc r16=ar.pfs,0,0,1,0 - mov out0=r0 // we are linked at the right address + mov out0=r0 // we are linked at the right address ;; // we just need to process fptrs } -#if 0 /* XXX: Look into relocs */ +#if 0 /* XXX: Look into relocs */ { .mib nop 0 nop 0 br.call.sptk.many rp=_reloc } -#endif +#endif { .mib nop 0 nop 0 @@ -261,7 +261,7 @@ END(os_boot_rendez) /*STATIC_ENTRY(_reloc, 1) alloc loc0=ar.pfs,1,2,0,0 mov loc1=rp - ;; + ;; movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc. movl r2=@gprel(fptr_storage) movl r3=@gprel(fptr_storage_end) @@ -276,34 +276,34 @@ END(os_boot_rendez) ;; cmp.eq p6,p0=DT_NULL,r16 // done? (p6) br.cond.dpnt.few 2f - ;; + ;; cmp.eq p6,p0=DT_RELA,r16 - ;; + ;; (p6) add r18=r17,in0 // found rela section - ;; + ;; cmp.eq p6,p0=DT_RELASZ,r16 - ;; + ;; (p6) mov r19=r17 // found rela size - ;; + ;; cmp.eq p6,p0=DT_SYMTAB,r16 - ;; + ;; (p6) add r20=r17,in0 // found symbol table - ;; + ;; (p6) setf.sig f8=r20 - ;; + ;; cmp.eq p6,p0=DT_SYMENT,r16 - ;; + ;; (p6) setf.sig f9=r17 // found symbol entry size - ;; + ;; cmp.eq p6,p0=DT_RELAENT,r16 - ;; + ;; (p6) mov r22=r17 // found rela entry size ;; br.sptk.few 1b - -2: + +2: ld8 r15=[r18],8 // read r_offset - ;; + ;; ld8 r16=[r18],8 // read r_info add r15=r15,in0 // relocate r_offset ;; @@ -320,7 +320,7 @@ END(os_boot_rendez) ;; extr.u r16=r16,32,32 // ELF64_R_SYM(r16) - ;; + ;; setf.sig f10=r16 // so we can multiply ;; xma.lu f10=f10,f9,f8 // f10=symtab + r_sym*syment Index: src/sys/arch/ia64/ia64/machdep.c diff -u src/sys/arch/ia64/ia64/machdep.c:1.44 src/sys/arch/ia64/ia64/machdep.c:1.45 --- src/sys/arch/ia64/ia64/machdep.c:1.44 Thu Feb 23 14:55:47 2023 +++ src/sys/arch/ia64/ia64/machdep.c Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: machdep.c,v 1.44 2023/02/23 14:55:47 riastradh Exp $ */ +/* $NetBSD: machdep.c,v 1.45 2023/10/06 11:45:16 skrll Exp $ */ /*- * Copyright (c) 2003,2004 Marcel Moolenaar @@ -124,7 +124,7 @@ #else #define DPRINTF(fmt, args...) ((void)0) #endif - + /* the following is used externally (sysctl_hw) */ char machine[] = MACHINE; /* from <machine/param.h> */ char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ @@ -273,7 +273,7 @@ map_vhpt(uintptr_t vhpt) * exactly sure why this is needed with GCC 7.4 */ register uint64_t log2size = pmap_vhpt_log2size << 2; - + pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW; pte |= vhpt & PTE_PPN_MASK; @@ -620,11 +620,11 @@ ia64_init(void) lwp0.l_md.user_stack = NULL; lwp0.l_md.user_stack_size = 0; - + pcb0 = lwp_getpcb(&lwp0); pcb0->pcb_special.sp = v + UAREA_SP_OFFSET; pcb0->pcb_special.bspstore = v + UAREA_BSPSTORE_OFFSET; - + /* * Setup global data for the bootstrap cpu. */ @@ -672,7 +672,7 @@ ia64_init(void) * Initialize the virtual memory system. */ pmap_bootstrap(); - + /* * Initialize debuggers, and break into them if appropriate. */ @@ -688,7 +688,7 @@ ia64_init(void) ret.bspstore = pcb0->pcb_special.bspstore; ret.sp = pcb0->pcb_special.sp; - + return (ret); } Index: src/sys/arch/ia64/ia64/pal.S diff -u src/sys/arch/ia64/ia64/pal.S:1.1 src/sys/arch/ia64/ia64/pal.S:1.2 --- src/sys/arch/ia64/ia64/pal.S:1.1 Fri Apr 7 14:21:18 2006 +++ src/sys/arch/ia64/ia64/pal.S Fri Oct 6 11:45:16 2023 @@ -1,5 +1,5 @@ -/* $NetBSD: pal.S,v 1.1 2006/04/07 14:21:18 cherry Exp $ */ - +/* $NetBSD: pal.S,v 1.2 2023/10/06 11:45:16 skrll Exp $ */ + /*- * Copyright (c) 2000-2001 Doug Rabson * All rights reserved. @@ -40,7 +40,7 @@ ia64_pal_entry: .quad 0 * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_static, 4) - + .regstk 4,5,0,0 palret = loc0 entry = loc1 @@ -49,7 +49,7 @@ pfssave = loc3 psrsave = loc4 alloc pfssave=ar.pfs,4,5,0,0 - ;; + ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) @@ -79,15 +79,15 @@ psrsave = loc4 br.ret.sptk rp END(ia64_call_pal_static) - + #ifdef _KERNEL - + /* * struct ia64_pal_result ia64_call_pal_static_physical(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_static_physical, 4) - + .regstk 4,5,0,0 palret = loc0 entry = loc1 @@ -96,7 +96,7 @@ pfssave = loc3 psrsave = loc4 alloc pfssave=ar.pfs,4,5,0,0 - ;; + ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) @@ -124,22 +124,22 @@ psrsave = loc4 2: mov r14=psrsave ;; br.call.sptk.many rp=ia64_change_mode - ;; + ;; mov rp=rpsave mov ar.pfs=pfssave ;; br.ret.sptk rp END(ia64_call_pal_static_physical) - + #endif - + /* * struct ia64_pal_result ia64_call_pal_stacked(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_stacked, 4) - + .regstk 4,4,4,0 entry = loc0 rpsave = loc1 @@ -147,7 +147,7 @@ pfssave = loc2 psrsave = loc3 alloc pfssave=ar.pfs,4,4,4,0 - ;; + ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) ;; @@ -174,15 +174,15 @@ psrsave = loc3 br.ret.sptk rp END(ia64_call_pal_stacked) - + #ifdef _KERNEL - + /* * struct ia64_pal_result ia64_call_pal_stacked_physical(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_stacked_physical, 4) - + .regstk 4,4,4,0 entry = loc0 rpsave = loc1 @@ -190,7 +190,7 @@ pfssave = loc2 psrsave = loc3 alloc pfssave=ar.pfs,4,4,4,0 - ;; + ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) ;; @@ -214,7 +214,7 @@ psrsave = loc3 mov r14=psrsave ;; br.call.sptk.many rp=ia64_change_mode - ;; + ;; mov rp=rpsave mov ar.pfs=pfssave ;; Index: src/sys/arch/ia64/ia64/setjmp.S diff -u src/sys/arch/ia64/ia64/setjmp.S:1.1 src/sys/arch/ia64/ia64/setjmp.S:1.2 --- src/sys/arch/ia64/ia64/setjmp.S:1.1 Fri Apr 7 14:21:18 2006 +++ src/sys/arch/ia64/ia64/setjmp.S Fri Oct 6 11:45:16 2023 @@ -3,28 +3,28 @@ // Copyright (c) 1999, 2000 // Intel Corporation. // All rights reserved. -// +// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: -// +// // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. -// +// // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. -// +// // 3. All advertising materials mentioning features or use of this software // must display the following acknowledgement: -// +// // This product includes software developed by Intel Corporation and // its contributors. -// +// // 4. Neither the name of Intel Corporation or its contributors may be // used to endorse or promote products derived from this software // without specific prior written permission. -// +// // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION AND CONTRIBUTORS ``AS IS'' // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE @@ -36,7 +36,7 @@ // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. -// +// // // @@ -72,7 +72,7 @@ ENTRY(setjmp, 1) // Make sure buffer is aligned at 16byte boundary // add r10 = -0x10,r0 ;; // mask the lower 4 bits - and r32 = r32, r10;; + and r32 = r32, r10;; add r32 = 0x10, r32;; // move to next 16 byte boundary add r10 = J_PREDS, r32 // skip Unats & pfs save area @@ -94,7 +94,7 @@ ENTRY(setjmp, 1) st8 [r11] = r2, J_R4-J_BSP ;; st8 [r10] = r16, J_R5-J_LC - st8 [r32] = r14, J_NATS // Note: Unat at the + st8 [r32] = r14, J_NATS // Note: Unat at the // beginning of the save area mov r15 = ar.pfs ;; @@ -103,11 +103,11 @@ ENTRY(setjmp, 1) // st8.spill [r11] = r4, J_R6-J_R4 ;; - st8.spill [r10] = r5, J_R7-J_R5 + st8.spill [r10] = r5, J_R7-J_R5 ;; st8.spill [r11] = r6, J_SP-J_R6 ;; - st8.spill [r10] = r7, J_F3-J_R7 + st8.spill [r10] = r7, J_F3-J_R7 ;; st8.spill [r11] = sp, J_F2-J_SP ;; @@ -120,34 +120,34 @@ ENTRY(setjmp, 1) ;; st8 [r32] = r15 // save pfs // - // save floating registers + // save floating registers // stf.spill [r11] = f2, J_F4-J_F2 - stf.spill [r10] = f3, J_F5-J_F3 + stf.spill [r10] = f3, J_F5-J_F3 ;; stf.spill [r11] = f4, J_F16-J_F4 - stf.spill [r10] = f5, J_F17-J_F5 + stf.spill [r10] = f5, J_F17-J_F5 ;; stf.spill [r11] = f16, J_F18-J_F16 - stf.spill [r10] = f17, J_F19-J_F17 + stf.spill [r10] = f17, J_F19-J_F17 ;; stf.spill [r11] = f18, J_F20-J_F18 - stf.spill [r10] = f19, J_F21-J_F19 + stf.spill [r10] = f19, J_F21-J_F19 ;; stf.spill [r11] = f20, J_F22-J_F20 - stf.spill [r10] = f21, J_F23-J_F21 + stf.spill [r10] = f21, J_F23-J_F21 ;; stf.spill [r11] = f22, J_F24-J_F22 - stf.spill [r10] = f23, J_F25-J_F23 + stf.spill [r10] = f23, J_F25-J_F23 ;; stf.spill [r11] = f24, J_F26-J_F24 - stf.spill [r10] = f25, J_F27-J_F25 + stf.spill [r10] = f25, J_F27-J_F25 ;; stf.spill [r11] = f26, J_F28-J_F26 - stf.spill [r10] = f27, J_F29-J_F27 + stf.spill [r10] = f27, J_F29-J_F27 ;; stf.spill [r11] = f28, J_F30-J_F28 - stf.spill [r10] = f29, J_F31-J_F29 + stf.spill [r10] = f29, J_F31-J_F29 ;; stf.spill [r11] = f30, J_FPSR-J_F30 stf.spill [r10] = f31, J_B0-J_F31 // size of f31 + fpsr @@ -155,21 +155,21 @@ ENTRY(setjmp, 1) // save FPSR register & branch registers // mov r2 = ar.fpsr // save fpsr register - mov r3 = b0 + mov r3 = b0 ;; st8 [r11] = r2, J_B1-J_FPSR st8 [r10] = r3, J_B2-J_B0 mov r2 = b1 - mov r3 = b2 + mov r3 = b2 ;; st8 [r11] = r2, J_B3-J_B1 st8 [r10] = r3, J_B4-J_B2 mov r2 = b3 - mov r3 = b4 + mov r3 = b4 ;; st8 [r11] = r2, J_B5-J_B3 st8 [r10] = r3 - mov r2 = b5 + mov r2 = b5 ;; st8 [r11] = r2 ;; @@ -201,7 +201,7 @@ ENTRY(longjmp, 2) // Make sure buffer is aligned at 16byte boundary // add r10 = -0x10,r0 ;; // mask the lower 4 bits - and r32 = r32, r10;; + and r32 = r32, r10;; add r32 = 0x10, r32;; // move to next 16 byte boundary // @@ -212,7 +212,7 @@ ENTRY(longjmp, 2) // // get immediate context // - mov r14 = ar.rsc // get user RSC conf + mov r14 = ar.rsc // get user RSC conf add r10 = J_PFS, r32 // get address of pfs add r11 = J_NATS, r32 ;; @@ -222,10 +222,10 @@ ENTRY(longjmp, 2) mov ar.unat = r2 ;; ld8 r16 = [r10], J_PREDS-J_BSP // get backing store pointer - mov ar.rsc = r0 // put RSE in enforced lazy + mov ar.rsc = r0 // put RSE in enforced lazy mov ar.pfs = r15 ;; - + // // while returning from longjmp the BSPSTORE and BSP needs to be // same and discard all the registers allocated after we did @@ -234,7 +234,7 @@ ENTRY(longjmp, 2) // mov r17 = ar.bspstore // get current BSPSTORE ;; - cmp.ltu p6,p7 = r17, r16 // is it less than BSP of + cmp.ltu p6,p7 = r17, r16 // is it less than BSP of (p6) br.spnt.few .flush_rse mov r19 = ar.rnat // get current RNAT ;; @@ -252,14 +252,14 @@ ENTRY(longjmp, 2) // check if RNAT is saved between saved BSP and curr BSPSTORE // mov r18 = 0x3f - ;; + ;; dep r18 = r18,r16,3,6 // get RNAT address ;; cmp.ltu p8,p9 = r18, r17 // RNAT saved on RSE ;; (p8) ld8 r19 = [r18] // get RNAT from RSE ;; - mov ar.bspstore = r16 // set new BSPSTORE + mov ar.bspstore = r16 // set new BSPSTORE ;; mov ar.rnat = r19 // restore RNAT mov ar.rsc = r14 // restore RSC conf @@ -275,20 +275,20 @@ ENTRY(longjmp, 2) // ld8.fill r4 = [r11], J_R6-J_R4 ;; - ld8.fill r5 = [r10], J_R7-J_R5 + ld8.fill r5 = [r10], J_R7-J_R5 ld8.fill r6 = [r11], J_SP-J_R6 ;; ld8.fill r7 = [r10], J_F2-J_R7 ld8.fill sp = [r11], J_F3-J_SP ;; // - // restore floating registers + // restore floating registers // ldf.fill f2 = [r10], J_F4-J_F2 - ldf.fill f3 = [r11], J_F5-J_F3 + ldf.fill f3 = [r11], J_F5-J_F3 ;; ldf.fill f4 = [r10], J_F16-J_F4 - ldf.fill f5 = [r11], J_F17-J_F5 + ldf.fill f5 = [r11], J_F17-J_F5 ;; ldf.fill f16 = [r10], J_F18-J_F16 ldf.fill f17 = [r11], J_F19-J_F17 @@ -300,7 +300,7 @@ ENTRY(longjmp, 2) ldf.fill f21 = [r11], J_F23-J_F21 ;; ldf.fill f22 = [r10], J_F24-J_F22 - ldf.fill f23 = [r11], J_F25-J_F23 + ldf.fill f23 = [r11], J_F25-J_F23 ;; ldf.fill f24 = [r10], J_F26-J_F24 ldf.fill f25 = [r11], J_F27-J_F25 @@ -309,7 +309,7 @@ ENTRY(longjmp, 2) ldf.fill f27 = [r11], J_F29-J_F27 ;; ldf.fill f28 = [r10], J_F30-J_F28 - ldf.fill f29 = [r11], J_F31-J_F29 + ldf.fill f29 = [r11], J_F31-J_F29 ;; ldf.fill f30 = [r10], J_FPSR-J_F30 ldf.fill f31 = [r11], J_B0-J_F31 ;; @@ -331,7 +331,7 @@ ENTRY(longjmp, 2) ld8 r3 = [r11] ;; mov b3 = r2 - mov b4 = r3 + mov b4 = r3 ld8 r2 = [r10] ld8 r21 = [r32] // get user unat ;; Index: src/sys/arch/ia64/ia64/pmap.c diff -u src/sys/arch/ia64/ia64/pmap.c:1.42 src/sys/arch/ia64/ia64/pmap.c:1.43 --- src/sys/arch/ia64/ia64/pmap.c:1.42 Sat Apr 9 23:38:32 2022 +++ src/sys/arch/ia64/ia64/pmap.c Fri Oct 6 11:45:16 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.42 2022/04/09 23:38:32 riastradh Exp $ */ +/* $NetBSD: pmap.c,v 1.43 2023/10/06 11:45:16 skrll Exp $ */ /*- * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc. @@ -81,7 +81,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.42 2022/04/09 23:38:32 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.43 2023/10/06 11:45:16 skrll Exp $"); #include <sys/param.h> #include <sys/atomic.h> @@ -450,7 +450,7 @@ pmap_bootstrap(void) memset((void *)ia64_kptdir, 0, PAGE_SIZE); nkpt = 0; kernel_vm_end = VM_INIT_KERNEL_ADDRESS; - + /* * Determine a valid (mappable) VHPT size. */ @@ -466,7 +466,7 @@ pmap_bootstrap(void) size = 1UL << pmap_vhpt_log2size; /* XXX add some retries here */ base = pmap_steal_vhpt_memory(size); - + curcpu()->ci_vhpt = base; if (base == 0) @@ -493,7 +493,7 @@ pmap_bootstrap(void) virtual_avail = VM_INIT_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; */ - + /* * Initialize the kernel pmap (which is statically allocated). */ @@ -548,18 +548,18 @@ pmap_invalidate_page(vaddr_t va) u_int vhpt_ofs; struct cpu_info *ci; CPU_INFO_ITERATOR cii; - + critical_enter(); vhpt_ofs = ia64_thash(va) - curcpu()->ci_vhpt; - + tag = ia64_ttag(va); for (CPU_INFO_FOREACH(cii,ci)) { pte = (struct ia64_lpte *)(ci->ci_vhpt + vhpt_ofs); atomic_cmpset_64(&pte->tag, tag, 1UL << 63); } - + mutex_spin_enter(&pmap_ptc_mutex); ia64_ptc_ga(va, PAGE_SHIFT << 2); @@ -567,7 +567,7 @@ pmap_invalidate_page(vaddr_t va) ia64_srlz_i(); mutex_spin_exit(&pmap_ptc_mutex); - + ia64_invala(); critical_exit(); @@ -580,7 +580,7 @@ pmap_invalidate_all(void) int i, j; UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); - + addr = pmap_ptc_e_base; for (i = 0; i < pmap_ptc_e_count1; i++) { for (j = 0; j < pmap_ptc_e_count2; j++) { @@ -599,7 +599,7 @@ pmap_allocate_rid(void) int rid; mutex_enter(&pmap_ridmutex); - + if (pmap_ridcount == pmap_ridmax) panic("pmap_allocate_rid: All Region IDs used"); @@ -622,7 +622,7 @@ pmap_allocate_rid(void) pmap_ridcount++; mutex_exit(&pmap_ridmutex); - + return rid; } @@ -853,7 +853,7 @@ free_pv_chunk(struct pv_chunk *pc) /* XXX might to need locks/other checks here, uvm_unwire, pmap_kremove... */ uvm_pagefree(m); -#endif +#endif vm_page_free1(m); } @@ -901,7 +901,7 @@ retry: /* No free items, allocate another chunk */ m = vm_page_alloc1(); - + if (m == NULL) { if (try) { pv_entry_count--; @@ -912,7 +912,7 @@ retry: if (m == NULL) goto retry; } - + PV_STAT(pc_chunk_count++); PV_STAT(pc_chunk_allocs++); pc = (struct pv_chunk *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); @@ -1053,7 +1053,7 @@ pmap_remove_entry(pmap_t pmap, struct vm KASSERT(rw_write_held(&pvh_global_lock)); if (!pv) { TAILQ_FOREACH(pv, &m->mdpage.pv_list, pv_list) { - if (pmap == PV_PMAP(pv) && va == pv->pv_va) + if (pmap == PV_PMAP(pv) && va == pv->pv_va) break; } } @@ -1116,12 +1116,12 @@ pmap_find_kpte(vaddr_t va) KPTE_DIR0_INDEX(va), KPTE_DIR1_INDEX(va), KPTE_PTE_INDEX(va), 0); UVMHIST_LOG(maphist, "(dir1=%p, leaf=%p ret=%p)", dir1, leaf, &leaf[KPTE_PTE_INDEX(va)], 0); - + return (&leaf[KPTE_PTE_INDEX(va)]); } /* - * Find a pte suitable for mapping a user-space address. If one exists + * Find a pte suitable for mapping a user-space address. If one exists * in the VHPT, that one will be returned, otherwise a new pte is * allocated. */ @@ -1224,7 +1224,7 @@ pmap_remove_pte(pmap_t pmap, struct ia64 */ error = pmap_remove_vhpt(va); KASSERTMSG(error == 0, "%s: pmap_remove_vhpt returned %d",__func__, error); - + pmap_invalidate_page(va); if (pmap_wired(pte)) @@ -1256,25 +1256,25 @@ void pmap_init(void) { UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); - + pmap_pool_cache = pool_cache_init(sizeof(struct pmap), 0, 0, 0, "pmap_pool_cache", NULL, IPL_VM, NULL, NULL, NULL); if (pmap_pool_cache == NULL) panic("%s cannot allocate pmap pool", __func__); - + pte_pool_cache = pool_cache_init(sizeof(struct ia64_lpte), 0, 0, 0, "pte_pool_cache", NULL, IPL_VM, NULL, NULL, NULL); if (pte_pool_cache == NULL) panic("%s cannot allocate pte pool", __func__); - + pmap_initialized = true; #if DEBUG if (0) pmap_testout(); -#endif +#endif } /* @@ -1383,7 +1383,7 @@ pmap_steal_vhpt_memory(vsize_t size) if (uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm) || /* XXX: ??? */ uvm_physseg_get_avail_start(upm) >= uvm_physseg_get_avail_end(upm)) continue; - + /* Break off a VHPT sized, aligned chunk off this segment. */ start1 = uvm_physseg_get_avail_start(upm); @@ -1484,7 +1484,7 @@ pmap_create(void) if (pmap == NULL) panic("%s no pool", __func__); - + PMAP_LOCK_INIT(pmap); for (i = 0; i < IA64_VM_MINKERN_REGION; i++) @@ -1498,7 +1498,7 @@ pmap_create(void) pmap->pm_refcount = 1; UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(pm=%p)", pmap, 0, 0, 0); + UVMHIST_LOG(maphist, "(pm=%p)", pmap, 0, 0, 0); return pmap; } @@ -1535,7 +1535,7 @@ pmap_destroy(pmap_t pmap) /*PMAP_UNLOCK(pmap);*/ /* XXX hmm */ PMAP_LOCK_DESTROY(pmap); - + pool_cache_put(pmap_pool_cache, pmap); } @@ -1588,7 +1588,7 @@ pmap_growkernel(vaddr_t maxkvaddr) paddr_t pa; vaddr_t va; #endif - + /* XXX this function may still need serious fixin' */ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(va=%#lx, nkpt=%ld, kvm_end=%#lx before)", maxkvaddr, nkpt, kernel_vm_end, 0); @@ -1609,7 +1609,7 @@ pmap_growkernel(vaddr_t maxkvaddr) /* FreeBSD does it this way... */ nkpg = vm_page_alloc(NULL, nkpt++, VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED); pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO); -#endif +#endif pg = vm_page_alloc1(); if (!pg) panic("%s: cannot add dir1 page", __func__); @@ -1618,7 +1618,7 @@ pmap_growkernel(vaddr_t maxkvaddr) #if 0 dir1 = (struct ia64_lpte **)pmap_page_to_va(nkpg); bzero(dir1, PAGE_SIZE); -#endif +#endif dir1 = (struct ia64_lpte **)pmap_page_to_va(pg); ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1; @@ -1627,15 +1627,15 @@ pmap_growkernel(vaddr_t maxkvaddr) #if 0 nkpg = vm_page_alloc(NULL, nkpt++, VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED); pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO); -#endif +#endif pg = vm_page_alloc1(); if (!pg) panic("%s: cannot add PTE page", __func__); nkpt++; -#if 0 +#if 0 leaf = (struct ia64_lpte *)pmap_page_to_va(nkpg); bzero(leaf, PAGE_SIZE); -#endif +#endif leaf = (struct ia64_lpte *)pmap_page_to_va(pg); dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf; @@ -1668,7 +1668,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd /* vm_memattr_t ma; */ /* XXX this needs work */ - + UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(pm=%p, va=%#lx, pa=%p, prot=%#x)", pmap, va, pa, prot); @@ -1677,7 +1677,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd canfail = (flags & PMAP_CANFAIL) != 0; /* ma = pmap_flags_to_memattr(flags); */ - + rw_enter(&pvh_global_lock, RW_WRITER); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -1717,7 +1717,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd /* XXX hmm pa = VM_PAGE_TO_PHYS(m); */ - + m = PHYS_TO_VM_PAGE(pa); if (m == NULL) { /* implies page not managed? */ @@ -1770,10 +1770,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd * Enter on the PV list if part of our managed memory. */ if (vm_page_is_managed(m)) { -#if 0 +#if 0 KASSERTMSG(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); -#endif +#endif pmap_insert_entry(pmap, va, m); managed = true; } @@ -1897,7 +1897,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v panic("pmap_protect: unaligned addresses"); sva = trunc_page(sva); eva = round_page(eva) - 1; - + PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); for ( ; sva < eva; sva += PAGE_SIZE) { @@ -1913,7 +1913,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v /* wired pages unaffected by prot changes */ if (pmap_wired(pte)) continue; - + if ((prot & VM_PROT_WRITE) == 0 && pmap_managed(pte) && pmap_dirty(pte)) { paddr_t pa = pmap_ppn(pte); @@ -1960,13 +1960,13 @@ pmap_unwire(pmap_t pmap, vaddr_t va) /* XXX panic if no pte or not wired? */ if (pte == NULL) panic("pmap_unwire: %lx not found in vhpt", va); - + if (!pmap_wired(pte)) panic("pmap_unwire: pte %p isn't wired", pte); - + pmap->pm_stats.wired_count--; pmap_clear_wired(pte); - + pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } @@ -2001,7 +2001,7 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa *pap = pa; UVMHIST_LOG(maphist, "(pa=%#lx)", pa, 0, 0, 0); - + return (pa != 0); } /* @@ -2018,7 +2018,7 @@ pmap_kextract(vaddr_t va) UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(va=%#lx)", va, 0, 0, 0); - + KASSERTMSG(va >= VM_MAXUSER_ADDRESS, "Must be kernel VA"); /* Regions 6 and 7 are direct mapped. */ @@ -2084,12 +2084,12 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v vm_memattr_t attr; const bool managed = false; /* don't gather ref/mod info */ const bool wired = true; /* pmap_kenter_pa always wired */ - + UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(va=%#lx, pa=%#lx, prot=%p, flags=%p)", va, pa, prot, flags); KASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS); - + attr = pmap_flags_to_memattr(flags); pte = pmap_find_kpte(va); @@ -2116,7 +2116,7 @@ pmap_kremove(vaddr_t va, vsize_t size) { struct ia64_lpte *pte; vaddr_t eva = va + size; - + UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(va=%#lx)", va, 0, 0, 0); @@ -2179,7 +2179,7 @@ pmap_activate(struct lwp *l) UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(lwp=%p)", l, 0, 0, 0); KASSERT(l == curlwp); -#if 0 +#if 0 /* pmap_switch(vmspace_pmap(td->td_proc->p_vmspace)); */ pmap_switch(vm_map_pmap(&l->l_proc->p_vmspace->vm_map)); #else @@ -2226,7 +2226,7 @@ pmap_zero_page(paddr_t phys) { struct vm_page *m; vaddr_t va; - + UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(pa=%p)", phys, 0, 0, 0); @@ -2261,11 +2261,11 @@ pmap_copy_page(paddr_t psrc, paddr_t pds md = PHYS_TO_VM_PAGE(pdst); ms = PHYS_TO_VM_PAGE(psrc); KASSERT(md != NULL && ms != NULL); - + dst_va = pmap_page_to_va(md); src_va = pmap_page_to_va(ms); KASSERT(trunc_page(dst_va) == dst_va && trunc_page(src_va) == src_va); - + memcpy((void *)dst_va, (void *)src_va, PAGE_SIZE); } @@ -2286,7 +2286,7 @@ pmap_page_protect(struct vm_page *pg, vm pmap_t pmap; pv_entry_t pv; vaddr_t va; - + UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(m=%p, prot=%p)", pg, prot, 0, 0); @@ -2310,7 +2310,7 @@ pmap_page_protect(struct vm_page *pg, vm vm_page_aflag_clear(pg, PGA_WRITEABLE); */ pg->flags |= PG_RDONLY; - + rw_exit(&pvh_global_lock); } else { pmap_remove_all_phys(pg); @@ -2337,7 +2337,7 @@ pmap_clear_modify(struct vm_page *pg) __func__, pg); rv = false; - + //VM_OBJECT_ASSERT_WLOCKED(m->object); //KASSERT(!vm_page_xbusied(m), // ("pmap_clear_modify: page %p is exclusive busied", m)); @@ -2353,7 +2353,7 @@ pmap_clear_modify(struct vm_page *pg) return; if (pg->flags & PG_RDONLY) return (rv); -#endif +#endif rw_enter(&pvh_global_lock, RW_WRITER); TAILQ_FOREACH(pv, &pg->mdpage.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -2399,7 +2399,7 @@ pmap_clear_reference(struct vm_page *pg) __func__, pg); rv = false; - + rw_enter(&pvh_global_lock, RW_WRITER); TAILQ_FOREACH(pv, &pg->mdpage.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -2417,7 +2417,7 @@ pmap_clear_reference(struct vm_page *pg) } rw_exit(&pvh_global_lock); - return (rv); + return (rv); } /* @@ -2488,7 +2488,7 @@ pmap_is_referenced(struct vm_page *pg) KASSERTMSG(vm_page_is_managed(pg), "%s: page %p is not managed", __func__, pg); - + rv = false; rw_enter(&pvh_global_lock, RW_WRITER); TAILQ_FOREACH(pv, &pg->mdpage.pv_list, pv_list) { @@ -2496,7 +2496,7 @@ pmap_is_referenced(struct vm_page *pg) PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(pv->pv_va); - KASSERTMSG(pte != NULL, "pte"); + KASSERTMSG(pte != NULL, "pte"); rv = pmap_accessed(pte) ? true : false; pmap_switch(oldpmap); PMAP_UNLOCK(pmap); @@ -2505,7 +2505,7 @@ pmap_is_referenced(struct vm_page *pg) } rw_exit(&pvh_global_lock); - return (rv); + return (rv); } /* @@ -2571,7 +2571,7 @@ pmap_procwr(struct proc *p, vaddr_t va, vsize_t len; struct pmap * const pm = p->p_vmspace->vm_map.pmap; - + UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(pm=%p, va=%#lx, sz=%#lx)", pm, va, sz, 0); @@ -2635,7 +2635,7 @@ pmap_remove_all_phys(struct vm_page *m) pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } - /* XXX freebsd + /* XXX freebsd vm_page_aflag_clear(m, PGA_WRITEABLE); */ m->flags |= PG_RDONLY; @@ -2652,7 +2652,7 @@ static struct vm_page *vm_page_alloc1(void) { struct vm_page *pg; - + pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO); if (pg) { pg->wire_count = 1; /* no mappings yet */ @@ -2689,13 +2689,13 @@ pmap_flags_to_memattr(u_int flags) { u_int cacheflags = flags & PMAP_CACHE_MASK; -#if 0 +#if 0 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(PMAP_NOCACHE=%u, PMAP_WRITE_COMBINE=%u, " "PMAP_WRITE_BACK=%u, PMAP_NOCACHE_OVR=%u)", (flags & PMAP_NOCACHE) != 0, (flags & PMAP_WRITE_COMBINE) != 0, (flags & PMAP_WRITE_BACK) != 0, (flags & PMAP_NOCACHE_OVR) != 0); -#endif +#endif switch (cacheflags) { case PMAP_NOCACHE: return VM_MEMATTR_UNCACHEABLE; @@ -2726,7 +2726,7 @@ pmap_testout(void) struct vm_page *pg; int ref, mod; bool extracted; - + /* Allocate a page */ va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); KASSERT(va != 0); Index: src/sys/arch/ia64/ia64/vm_machdep.c diff -u src/sys/arch/ia64/ia64/vm_machdep.c:1.18 src/sys/arch/ia64/ia64/vm_machdep.c:1.19 --- src/sys/arch/ia64/ia64/vm_machdep.c:1.18 Thu Feb 23 14:55:47 2023 +++ src/sys/arch/ia64/ia64/vm_machdep.c Fri Oct 6 11:45:16 2023 @@ -1,11 +1,11 @@ -/* $NetBSD: vm_machdep.c,v 1.18 2023/02/23 14:55:47 riastradh Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.19 2023/10/06 11:45:16 skrll Exp $ */ /* * Copyright (c) 2006 The NetBSD Foundation, Inc. * All rights reserved. * * - * Author: + * Author: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -103,9 +103,9 @@ cpu_switchto(lwp_t *oldlwp, lwp_t *newlw /* required for lwp_startup, copy oldlwp into r9, "mov r9=in0" */ __asm __volatile("mov %0=%1" : "=r"(reg9) : "r"(oldlwp)); - + /* XXX handle RAS eventually */ - + if (oldlwp == NULL) { restorectx(newpcb); } else { @@ -120,7 +120,7 @@ cpu_switchto(lwp_t *oldlwp, lwp_t *newlw /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb and trap frame, making the child ready to run. - * + * * Rig the child's kernel stack so that it will start out in * lwp_trampoline() and call child_return() with p2 as an * argument. This causes the newly-created child process to go @@ -177,7 +177,7 @@ cpu_lwp_fork(struct lwp *l1, struct lwp l2->l_md.md_astpending = 0; l2->l_md.user_stack = NULL; l2->l_md.user_stack_size = 0; - + /* * Copy the trapframe. */