Module Name: src Committed By: cherry Date: Sun Jan 6 14:35:31 UTC 2019
Modified Files: src/sys/arch/amd64/amd64: cpufunc.S src/sys/arch/i386/i386: cpufunc.S i386func.S src/sys/arch/xen/x86: xenfunc.c Log Message: Rollback http://mail-index.netbsd.org/source-changes/2018/12/22/msg101629.html This change breaks module loading due to weak alias being unsupported in the kernel module linker. Requested by maxv@ and others as it affects their work. No immediate decision on a replacement method is available, but other options suggested include pre-processing, conditional compilation (#ifdef etc) and other source level methods to avoid linktime decision making. To generate a diff of this commit: cvs rdiff -u -r1.34 -r1.35 src/sys/arch/amd64/amd64/cpufunc.S cvs rdiff -u -r1.26 -r1.27 src/sys/arch/i386/i386/cpufunc.S cvs rdiff -u -r1.19 -r1.20 src/sys/arch/i386/i386/i386func.S cvs rdiff -u -r1.23 -r1.24 src/sys/arch/xen/x86/xenfunc.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/amd64/amd64/cpufunc.S diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.34 src/sys/arch/amd64/amd64/cpufunc.S:1.35 --- src/sys/arch/amd64/amd64/cpufunc.S:1.34 Sat Dec 22 21:27:22 2018 +++ src/sys/arch/amd64/amd64/cpufunc.S Sun Jan 6 14:35:31 2019 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.S,v 1.34 2018/12/22 21:27:22 cherry Exp $ */ +/* $NetBSD: cpufunc.S,v 1.35 2019/01/06 14:35:31 cherry Exp $ */ /* * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. @@ -63,53 +63,18 @@ ENTRY(x86_mfence) ret END(x86_mfence) -/* - * These functions below should always be accessed via the corresponding wrapper - * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() - * - * We use this rather roundabout method so that a runtime wrapper function may - * be made available for PVHVM, which could override both native and PV aliases - * and decide which to invoke at run time. - */ - -WEAK_ALIAS(invlpg, amd64_invlpg) -WEAK_ALIAS(lidt, amd64_lidt) -WEAK_ALIAS(lldt, amd64_lldt) -WEAK_ALIAS(ltr, amd64_ltr) -WEAK_ALIAS(lcr0, amd64_lcr0) -WEAK_ALIAS(rcr0, amd64_rcr0) -WEAK_ALIAS(rcr2, amd64_rcr2) -WEAK_ALIAS(lcr2, amd64_lcr2) -WEAK_ALIAS(rcr3, amd64_rcr3) -WEAK_ALIAS(lcr3, amd64_lcr3) -WEAK_ALIAS(tlbflush, amd64_tlbflush) -WEAK_ALIAS(tlbflushg, amd64_tlbflushg) -WEAK_ALIAS(rdr0, amd64_rdr0) -WEAK_ALIAS(ldr0, amd64_ldr0) -WEAK_ALIAS(rdr1, amd64_rdr1) -WEAK_ALIAS(ldr1, amd64_ldr1) -WEAK_ALIAS(rdr2, amd64_rdr2) -WEAK_ALIAS(ldr2, amd64_ldr2) -WEAK_ALIAS(rdr3, amd64_rdr3) -WEAK_ALIAS(ldr3, amd64_ldr3) -WEAK_ALIAS(rdr6, amd64_rdr6) -WEAK_ALIAS(ldr6, amd64_ldr6) -WEAK_ALIAS(rdr7, amd64_rdr7) -WEAK_ALIAS(ldr7, amd64_ldr7) -WEAK_ALIAS(wbinvd, amd64_wbinvd) - #ifndef XEN -ENTRY(amd64_invlpg) +ENTRY(invlpg) invlpg (%rdi) ret -END(amd64_invlpg) +END(invlpg) -ENTRY(amd64_lidt) +ENTRY(lidt) lidt (%rdi) ret -END(amd64_lidt) +END(lidt) -ENTRY(amd64_lldt) +ENTRY(lldt) cmpl %edi, CPUVAR(CURLDT) jne 1f ret @@ -117,42 +82,42 @@ ENTRY(amd64_lldt) movl %edi, CPUVAR(CURLDT) lldt %di ret -END(amd64_lldt) +END(lldt) -ENTRY(amd64_ltr) +ENTRY(ltr) ltr %di ret -END(amd64_ltr) +END(ltr) -ENTRY(amd64_lcr0) +ENTRY(lcr0) movq %rdi, %cr0 ret -END(amd64_lcr0) +END(lcr0) -ENTRY(amd64_rcr0) +ENTRY(rcr0) movq %cr0, %rax ret -END(amd64_rcr0) +END(rcr0) -ENTRY(amd64_lcr2) +ENTRY(lcr2) movq %rdi, %cr2 ret -END(amd64_lcr2) +END(lcr2) -ENTRY(amd64_rcr2) +ENTRY(rcr2) movq %cr2, %rax ret -END(amd64_rcr2) +END(rcr2) -ENTRY(amd64_lcr3) +ENTRY(lcr3) movq %rdi, %cr3 ret -END(amd64_lcr3) +END(lcr3) -ENTRY(amd64_rcr3) +ENTRY(rcr3) movq %cr3, %rax ret -END(amd64_rcr3) +END(rcr3) #endif ENTRY(lcr4) @@ -194,7 +159,7 @@ END(rcr8) * If PGE is not in use, we reload CR3. */ #ifndef XEN -ENTRY(amd64_tlbflushg) +ENTRY(tlbflushg) movq %cr4, %rax testq $CR4_PGE, %rax jz 1f @@ -203,74 +168,74 @@ ENTRY(amd64_tlbflushg) movq %rdx, %cr4 movq %rax, %cr4 ret -END(amd64_tlbflushg) +END(tlbflushg) -ENTRY(amd64_tlbflush) +ENTRY(tlbflush) 1: movq %cr3, %rax movq %rax, %cr3 ret -END(amd64_tlbflush) +END(tlbflush) -ENTRY(amd64_ldr0) +ENTRY(ldr0) movq %rdi, %dr0 ret -END(amd64_ldr0) +END(ldr0) -ENTRY(amd64_rdr0) +ENTRY(rdr0) movq %dr0, %rax ret -END(amd64_rdr0) +END(rdr0) -ENTRY(amd64_ldr1) +ENTRY(ldr1) movq %rdi, %dr1 ret -END(amd64_ldr1) +END(ldr1) -ENTRY(amd64_rdr1) +ENTRY(rdr1) movq %dr1, %rax ret -END(amd64_rdr1) +END(rdr1) -ENTRY(amd64_ldr2) +ENTRY(ldr2) movq %rdi, %dr2 ret -END(amd64_ldr2) +END(ldr2) -ENTRY(amd64_rdr2) +ENTRY(rdr2) movq %dr2, %rax ret -END(amd64_rdr2) +END(rdr2) -ENTRY(amd64_ldr3) +ENTRY(ldr3) movq %rdi, %dr3 ret -END(amd64_ldr3) +END(ldr3) -ENTRY(amd64_rdr3) +ENTRY(rdr3) movq %dr3, %rax ret -END(amd64_rdr3) +END(rdr3) -ENTRY(amd64_ldr6) +ENTRY(ldr6) movq %rdi, %dr6 ret -END(amd64_ldr6) +END(ldr6) -ENTRY(amd64_rdr6) +ENTRY(rdr6) movq %dr6, %rax ret -END(amd64_rdr6) +END(rdr6) -ENTRY(amd64_ldr7) +ENTRY(ldr7) movq %rdi, %dr7 ret -END(amd64_ldr7) +END(ldr7) -ENTRY(amd64_rdr7) +ENTRY(rdr7) movq %dr7, %rax ret -END(amd64_rdr7) +END(rdr7) ENTRY(x86_disable_intr) cli Index: src/sys/arch/i386/i386/cpufunc.S diff -u src/sys/arch/i386/i386/cpufunc.S:1.26 src/sys/arch/i386/i386/cpufunc.S:1.27 --- src/sys/arch/i386/i386/cpufunc.S:1.26 Sat Dec 22 21:27:22 2018 +++ src/sys/arch/i386/i386/cpufunc.S Sun Jan 6 14:35:31 2019 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $ */ +/* $NetBSD: cpufunc.S,v 1.27 2019/01/06 14:35:31 cherry Exp $ */ /*- * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ #include <sys/errno.h> #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.27 2019/01/06 14:35:31 cherry Exp $"); #include "opt_xen.h" @@ -47,18 +47,6 @@ __KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v #include "assym.h" -/* - * These functions below should always be accessed via the corresponding wrapper - * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() - * - * We use this rather roundabout method so that a runtime wrapper function may - * be made available for PVHVM, which could override both native and PV aliases - * and decide which to invoke at run time. - */ - -WEAK_ALIAS(lidt, i386_lidt) -WEAK_ALIAS(rcr3, i386_rcr3) - ENTRY(x86_lfence) lock addl $0, -4(%esp) @@ -78,17 +66,17 @@ ENTRY(x86_mfence) END(x86_mfence) #ifndef XEN -ENTRY(i386_lidt) +ENTRY(lidt) movl 4(%esp), %eax lidt (%eax) ret -END(i386_lidt) +END(lidt) #endif /* XEN */ -ENTRY(i386_rcr3) +ENTRY(rcr3) movl %cr3, %eax ret -END(i386_rcr3) +END(rcr3) ENTRY(lcr4) movl 4(%esp), %eax Index: src/sys/arch/i386/i386/i386func.S diff -u src/sys/arch/i386/i386/i386func.S:1.19 src/sys/arch/i386/i386/i386func.S:1.20 --- src/sys/arch/i386/i386/i386func.S:1.19 Sat Dec 22 21:27:22 2018 +++ src/sys/arch/i386/i386/i386func.S Sun Jan 6 14:35:31 2019 @@ -1,4 +1,4 @@ -/* $NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $ */ +/* $NetBSD: i386func.S,v 1.20 2019/01/06 14:35:31 cherry Exp $ */ /*- * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. @@ -36,53 +36,20 @@ */ #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.20 2019/01/06 14:35:31 cherry Exp $"); #include <machine/specialreg.h> #include <machine/segments.h> #include "assym.h" -/* - * These functions below should always be accessed via the corresponding wrapper - * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() - * - * We use this rather roundabout method so that a runtime wrapper function may - * be made available for PVHVM, which could override both native and PV aliases - * and decide which to invoke at run time. - */ - -WEAK_ALIAS(invlpg, i386_invlpg) -WEAK_ALIAS(lldt, i386_lldt) -WEAK_ALIAS(ltr, i386_ltr) -WEAK_ALIAS(lcr0, i386_lcr0) -WEAK_ALIAS(rcr0, i386_rcr0) -WEAK_ALIAS(lcr3, i386_lcr3) -WEAK_ALIAS(tlbflush, i386_tlbflush) -WEAK_ALIAS(tlbflushg, i386_tlbflushg) -WEAK_ALIAS(rdr0, i386_rdr0) -WEAK_ALIAS(ldr0, i386_ldr0) -WEAK_ALIAS(rdr1, i386_rdr1) -WEAK_ALIAS(ldr1, i386_ldr1) -WEAK_ALIAS(rdr2, i386_rdr2) -WEAK_ALIAS(ldr2, i386_ldr2) -WEAK_ALIAS(rdr3, i386_rdr3) -WEAK_ALIAS(ldr3, i386_ldr3) -WEAK_ALIAS(rdr6, i386_rdr6) -WEAK_ALIAS(ldr6, i386_ldr6) -WEAK_ALIAS(rdr7, i386_rdr7) -WEAK_ALIAS(ldr7, i386_ldr7) -WEAK_ALIAS(rcr2, i386_rcr2) -WEAK_ALIAS(lcr2, i386_lcr2) -WEAK_ALIAS(wbinvd, i386_wbinvd) - -ENTRY(i386_invlpg) +ENTRY(invlpg) movl 4(%esp), %eax invlpg (%eax) ret -END(i386_invlpg) +END(invlpg) -ENTRY(i386_lldt) +ENTRY(lldt) movl 4(%esp), %eax cmpl %eax, CPUVAR(CURLDT) jne 1f @@ -91,30 +58,30 @@ ENTRY(i386_lldt) movl %eax, CPUVAR(CURLDT) lldt %ax ret -END(i386_lldt) +END(lldt) -ENTRY(i386_ltr) +ENTRY(ltr) movl 4(%esp), %eax ltr %ax ret -END(i386_ltr) +END(ltr) -ENTRY(i386_lcr0) +ENTRY(lcr0) movl 4(%esp), %eax movl %eax, %cr0 ret -END(i386_lcr0) +END(lcr0) -ENTRY(i386_rcr0) +ENTRY(rcr0) movl %cr0, %eax ret -END(i386_rcr0) +END(rcr0) -ENTRY(i386_lcr3) +ENTRY(lcr3) movl 4(%esp), %eax movl %eax, %cr3 ret -END(i386_lcr3) +END(lcr3) /* * Big hammer: flush all TLB entries, including ones from PTE's @@ -136,7 +103,7 @@ END(i386_lcr3) * first since i486 does not have CR4. Note: the feature flag may * be present while the actual PGE functionality not yet enabled. */ -ENTRY(i386_tlbflushg) +ENTRY(tlbflushg) testl $CPUID_PGE, _C_LABEL(cpu_feature) jz 1f movl %cr4, %eax @@ -147,96 +114,96 @@ ENTRY(i386_tlbflushg) movl %edx, %cr4 movl %eax, %cr4 ret -END(i386_tlbflushg) +END(tlbflushg) -ENTRY(i386_tlbflush) +ENTRY(tlbflush) 1: movl %cr3, %eax movl %eax, %cr3 ret -END(i386_tlbflush) +END(tlbflush) -ENTRY(i386_ldr0) +ENTRY(ldr0) movl 4(%esp), %eax movl %eax, %dr0 ret -END(i386_ldr0) +END(ldr0) -ENTRY(i386_rdr0) +ENTRY(rdr0) movl %dr0, %eax ret -END(i386_rdr0) +END(rdr0) -ENTRY(i386_ldr1) +ENTRY(ldr1) movl 4(%esp), %eax movl %eax, %dr1 ret -END(i386_ldr1) +END(ldr1) -ENTRY(i386_rdr1) +ENTRY(rdr1) movl %dr1, %eax ret -END(i386_rdr1) +END(rdr1) -ENTRY(i386_ldr2) +ENTRY(ldr2) movl 4(%esp), %eax movl %eax, %dr2 ret -END(i386_ldr2) +END(ldr2) -ENTRY(i386_rdr2) +ENTRY(rdr2) movl %dr2, %eax ret -END(i386_rdr2) +END(rdr2) -ENTRY(i386_ldr3) +ENTRY(ldr3) movl 4(%esp), %eax movl %eax, %dr3 ret -END(i386_ldr3) +END(ldr3) -ENTRY(i386_rdr3) +ENTRY(rdr3) movl %dr3, %eax ret -END(i386_rdr3) +END(rdr3) -ENTRY(i386_ldr6) +ENTRY(ldr6) movl 4(%esp), %eax movl %eax, %dr6 ret -END(i386_ldr6) +END(ldr6) -ENTRY(i386_rdr6) +ENTRY(rdr6) movl %dr6, %eax ret -END(i386_rdr6) +END(rdr6) -ENTRY(i386_ldr7) +ENTRY(ldr7) movl 4(%esp), %eax movl %eax, %dr7 ret -END(i386_ldr7) +END(ldr7) -ENTRY(i386_rdr7) +ENTRY(rdr7) movl %dr7, %eax ret -END(i386_rdr7) +END(rdr7) -ENTRY(i386_rcr2) +ENTRY(rcr2) movl %cr2, %eax ret -END(i386_rcr2) +END(rcr2) -ENTRY(i386_lcr2) +ENTRY(lcr2) movl 4(%esp), %eax movl %eax, %cr2 ret -END(i386_lcr2) +END(lcr2) -ENTRY(i386_wbinvd) +ENTRY(wbinvd) wbinvd ret -END(i386_wbinvd) +END(wbinvd) ENTRY(x86_disable_intr) cli Index: src/sys/arch/xen/x86/xenfunc.c diff -u src/sys/arch/xen/x86/xenfunc.c:1.23 src/sys/arch/xen/x86/xenfunc.c:1.24 --- src/sys/arch/xen/x86/xenfunc.c:1.23 Sat Dec 22 21:27:22 2018 +++ src/sys/arch/xen/x86/xenfunc.c Sun Jan 6 14:35:31 2019 @@ -1,4 +1,4 @@ -/* $NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $ */ +/* $NetBSD: xenfunc.c,v 1.24 2019/01/06 14:35:31 cherry Exp $ */ /* * Copyright (c) 2004 Christian Limpach. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.24 2019/01/06 14:35:31 cherry Exp $"); #include <sys/param.h> @@ -45,74 +45,8 @@ __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v void xen_set_ldt(vaddr_t, uint32_t); -/* - * We don't need to export these declarations, since they are used via - * linker aliasing. They should always be accessed via the - * corresponding wrapper function names defined in - * x86/include/cpufunc.h and exported as __weak_alias() - * - * We use this rather roundabout method so that a runtime wrapper - * function may be made available for PVHVM, which could override both - * native and PV aliases and decide which to invoke at run time. - */ - -void xen_invlpg(vaddr_t); -void xen_lidt(struct region_descriptor *); -void xen_lldt(u_short); -void xen_ltr(u_short); -void xen_lcr0(u_long); -u_long xen_rcr0(void); -void xen_tlbflush(void); -void xen_tlbflushg(void); -register_t xen_rdr0(void); -void xen_ldr0(register_t); -register_t xen_rdr1(void); -void xen_ldr1(register_t); -register_t xen_rdr2(void); -void xen_ldr2(register_t); -register_t xen_rdr3(void); -void xen_ldr3(register_t); -register_t xen_rdr6(void); -void xen_ldr6(register_t); -register_t xen_rdr7(void); -void xen_ldr7(register_t); -void xen_wbinvd(void); -vaddr_t xen_rcr2(void); - -__weak_alias(invlpg, xen_invlpg); -__weak_alias(lidt, xen_lidt); -__weak_alias(lldt, xen_lldt); -__weak_alias(ltr, xen_ltr); -__weak_alias(lcr0, xen_lcr0); -__weak_alias(rcr0, xen_rcr0); -__weak_alias(tlbflush, xen_tlbflush); -__weak_alias(tlbflushg, xen_tlbflushg); -__weak_alias(rdr0, xen_rdr0); -__weak_alias(ldr0, xen_ldr0); -__weak_alias(rdr1, xen_rdr1); -__weak_alias(ldr1, xen_ldr1); -__weak_alias(rdr2, xen_rdr2); -__weak_alias(ldr2, xen_ldr2); -__weak_alias(rdr3, xen_rdr3); -__weak_alias(ldr3, xen_ldr3); -__weak_alias(rdr6, xen_rdr6); -__weak_alias(ldr6, xen_ldr6); -__weak_alias(rdr7, xen_rdr7); -__weak_alias(ldr7, xen_ldr7); -__weak_alias(wbinvd, xen_wbinvd); -__weak_alias(rcr2, xen_rcr2); - -#ifdef __x86_64__ -void xen_setusergs(int); -__weak_alias(setusergs, xen_setusergs); -#else -void xen_lcr3(vaddr_t); -__weak_alias(lcr3, xen_lcr3); - -#endif - void -xen_invlpg(vaddr_t addr) +invlpg(vaddr_t addr) { int s = splvm(); /* XXXSMP */ xpq_queue_invlpg(addr); @@ -120,7 +54,7 @@ xen_invlpg(vaddr_t addr) } void -xen_lidt(struct region_descriptor *rd) +lidt(struct region_descriptor *rd) { /* * We need to do this because we can't assume kmem_alloc(9) @@ -165,7 +99,7 @@ xen_lidt(struct region_descriptor *rd) } void -xen_lldt(u_short sel) +lldt(u_short sel) { #ifndef __x86_64__ struct cpu_info *ci; @@ -184,19 +118,19 @@ xen_lldt(u_short sel) } void -xen_ltr(u_short sel) +ltr(u_short sel) { panic("XXX ltr not supported\n"); } void -xen_lcr0(u_long val) +lcr0(u_long val) { panic("XXX lcr0 not supported\n"); } u_long -xen_rcr0(void) +rcr0(void) { /* XXX: handle X86_CR0_TS ? */ return 0; @@ -204,7 +138,7 @@ xen_rcr0(void) #ifndef __x86_64__ void -xen_lcr3(vaddr_t val) +lcr3(vaddr_t val) { int s = splvm(); /* XXXSMP */ xpq_queue_pt_switch(xpmap_ptom_masked(val)); @@ -213,7 +147,7 @@ xen_lcr3(vaddr_t val) #endif void -xen_tlbflush(void) +tlbflush(void) { int s = splvm(); /* XXXSMP */ xpq_queue_tlb_flush(); @@ -221,110 +155,110 @@ xen_tlbflush(void) } void -xen_tlbflushg(void) +tlbflushg(void) { tlbflush(); } register_t -xen_rdr0(void) +rdr0(void) { return HYPERVISOR_get_debugreg(0); } void -xen_ldr0(register_t val) +ldr0(register_t val) { HYPERVISOR_set_debugreg(0, val); } register_t -xen_rdr1(void) +rdr1(void) { return HYPERVISOR_get_debugreg(1); } void -xen_ldr1(register_t val) +ldr1(register_t val) { HYPERVISOR_set_debugreg(1, val); } register_t -xen_rdr2(void) +rdr2(void) { return HYPERVISOR_get_debugreg(2); } void -xen_ldr2(register_t val) +ldr2(register_t val) { HYPERVISOR_set_debugreg(2, val); } register_t -xen_rdr3(void) +rdr3(void) { return HYPERVISOR_get_debugreg(3); } void -xen_ldr3(register_t val) +ldr3(register_t val) { HYPERVISOR_set_debugreg(3, val); } register_t -xen_rdr6(void) +rdr6(void) { return HYPERVISOR_get_debugreg(6); } void -xen_ldr6(register_t val) +ldr6(register_t val) { HYPERVISOR_set_debugreg(6, val); } register_t -xen_rdr7(void) +rdr7(void) { return HYPERVISOR_get_debugreg(7); } void -xen_ldr7(register_t val) +ldr7(register_t val) { HYPERVISOR_set_debugreg(7, val); } void -xen_wbinvd(void) +wbinvd(void) { xpq_flush_cache(); } vaddr_t -xen_rcr2(void) +rcr2(void) { return curcpu()->ci_vcpu->arch.cr2; } #ifdef __x86_64__ void -xen_setusergs(int gssel) +setusergs(int gssel) { HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); }