---
 i386/i386/msr.h  |  3 ++
 x86_64/boothdr.S | 86 +++++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 85 insertions(+), 4 deletions(-)

diff --git a/i386/i386/msr.h b/i386/i386/msr.h
index 8f09b80b..42b01062 100644
--- a/i386/i386/msr.h
+++ b/i386/i386/msr.h
@@ -26,6 +26,9 @@
 #define MSR_REG_FMASK 0xC0000084
 #define MSR_REG_FSBASE 0xC0000100
 #define MSR_REG_GSBASE 0xC0000101
+#ifdef __x86_64__
+#define MSR_REG_KGSBASE 0xC0000102
+#endif
 
 #define MSR_EFER_SCE  0x00000001
 
diff --git a/x86_64/boothdr.S b/x86_64/boothdr.S
index 43c6d54c..42423b31 100644
--- a/x86_64/boothdr.S
+++ b/x86_64/boothdr.S
@@ -20,7 +20,10 @@
 
 #include <i386/i386asm.h>
 #include <i386/i386/proc_reg.h>
+#include <i386/i386/gdt.h>
+#include <i386/i386/msr.h>
 #include <i386/i386/seg.h>
+#include <i386/apic.h>
 
 #define BOOT_CS        0x8
 #define BOOT_DS        0x10
@@ -52,6 +55,17 @@ boot_hdr:
        .global _start
 _start:
 boot_entry:
+       /* Enable local apic in xAPIC mode */
+       xorl    %eax, %eax
+       xorl    %edx, %edx
+       movl    $APIC_MSR, %ecx
+       rdmsr
+       orl     $APIC_MSR_ENABLE, %eax
+       orl     $APIC_MSR_BSP, %eax
+       andl    $(~APIC_MSR_X2APIC), %eax
+       movl    $APIC_MSR, %ecx
+       wrmsr
+
        /*
         * Prepare minimal page mapping to jump to 64 bit and to C code.
         * The first 4GB is identity mapped, and the first 2GB are re-mapped
@@ -137,7 +151,7 @@ switch64:
        mov     %cr4,%eax
        or      $CR4_PAE,%eax
        mov     %eax,%cr4
-       mov     $0xC0000080,%ecx  // select EFER register
+       mov     $MSR_REG_EFER,%ecx
        rdmsr
        or      $(1 << 8),%eax  // long mode enable bit
        wrmsr
@@ -166,6 +180,39 @@ boot_entry64:
        andq    $(~15),%rax
        movq    %rax,%rsp
 
+       /* Set GS base address */
+       movq    $percpu_array, %rdx
+       movl    %edx, %eax
+       shrq    $32, %rdx
+       movl    $MSR_REG_GSBASE, %ecx
+       wrmsr
+
+       /* Reload gdt in long mode takes 2 args */
+       movw    gdt64pointer, %di
+       movq    gdt64pointer+2, %rsi
+       lgdt    gdt64pointer
+
+       movw    $PERCPU_DS,%ax
+       movw    %ax,%gs
+
+       /* instead of ljmp */
+       movq    $fixup64, %rcx
+       pushq   $8
+       pushq   %rcx
+       /* far return to below - performs actual reload of gdt */
+       retfq
+
+fixup64:
+       /* Set KernelGS base address */
+       movq    $percpu_array, %rdx
+       movl    %edx, %eax
+       shrq    $32, %rdx
+       movl    $MSR_REG_KGSBASE, %ecx
+       wrmsr
+
+       /* Reset stack */
+       andq    $(~0xf),%rsp
+
        /* Reset EFLAGS to a known state.  */
        pushq   $0
        popf
@@ -200,16 +247,47 @@ iplt_done:
        .section .boot.data
        .align 4096
 #define  SEG_ACCESS_OFS 40
-#define  SEG_GRANULARITY_OFS 52
+#define  SEG_FLAGS_OFS 52
 gdt64:
        /* NULL segment = 0x0 */
        .quad   0
 
        /* BOOT_CS = 0x8 */
-        .quad  (ACC_P << SEG_ACCESS_OFS) | (ACC_CODE_R << SEG_ACCESS_OFS) | 
(SZ_64 << SEG_GRANULARITY_OFS)
+        .quad  ((ACC_A | ACC_P | ACC_CODE_R) << SEG_ACCESS_OFS) | (SZ_64 << 
SEG_FLAGS_OFS)
 
        /* BOOT_DS = 0x10 */
-       .quad   (ACC_P << SEG_ACCESS_OFS) | (ACC_DATA_W << SEG_ACCESS_OFS) | 
(SZ_64 << SEG_GRANULARITY_OFS)
+        .quad  ((ACC_A | ACC_P | ACC_DATA_W) << SEG_ACCESS_OFS) | (SZ_64 << 
SEG_FLAGS_OFS)
+
+       /* LDT = 0x18 */
+       .quad   0
+
+       /* TSS = 0x20 */
+       .quad   0
+
+       /* USER_LDT = 0x28 */
+       .quad   0
+
+       /* USER_TSS = 0x30 */
+       .quad   0
+
+       /* LINEAR = 0x38 */
+       .quad   0
+
+       /* FPREGS = 0x40 */
+       .quad   0
+
+       /* USER_GDT = 0x48 and 0x50 */
+       .quad   0
+       .quad   0
+
+       /* USER_TSS64 = 0x58 */
+       .quad   0
+
+       /* USER_TSS64 = 0x60 */
+       .quad   0
+
+       /* boot GS = 0x68 */
+        .quad  ((ACC_A | ACC_P | ACC_DATA_W) << SEG_ACCESS_OFS) | (SZ_64 << 
SEG_FLAGS_OFS)
 gdt64end:
        .skip   (4096 - (gdt64end - gdt64))
 gdt64pointer:
-- 
2.45.2



Reply via email to