From: David Woodhouse <d...@amazon.co.uk>

Signed-off-by: David Woodhouse <d...@amazon.co.uk>
---
 arch/x86/include/asm/kexec.h         |  5 ++
 arch/x86/kernel/machine_kexec_64.c   | 23 ++++++++
 arch/x86/kernel/relocate_kernel_64.S | 82 ++++++++++++++++++++++++++++
 3 files changed, 110 insertions(+)

diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 48e4f44f794f..c14b0a2704c0 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -8,6 +8,9 @@
 # define PA_PGD                        2
 # define PA_SWAP_PAGE          3
 # define PAGES_NR              4
+#else
+/* Size of each exception handler referenced by the IDT */
+# define KEXEC_DEBUG_EXC_HANDLER_SIZE  6 /* pushi, pushi, 2-byte jmp */
 #endif
 
 # define KEXEC_CONTROL_PAGE_SIZE       4096
@@ -58,6 +61,8 @@ struct kimage;
 extern unsigned long kexec_va_control_page;
 extern unsigned long kexec_pa_table_page;
 extern unsigned long kexec_pa_swap_page;
+extern gate_desc kexec_debug_idt[];
+extern unsigned char kexec_debug_exc_vectors[];
 #endif
 
 /*
diff --git a/arch/x86/kernel/machine_kexec_64.c 
b/arch/x86/kernel/machine_kexec_64.c
index 9232ad1562c8..f9a68c6ec815 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -304,6 +304,26 @@ static void load_segments(void)
                );
 }
 
+static void prepare_debug_idt(unsigned long control_page, unsigned long 
vec_ofs)
+{
+#ifdef CONFIG_KEXEC_DEBUG
+       gate_desc idtentry = { 0 };
+       int i;
+
+       idtentry.bits.p = 1;
+       idtentry.bits.type = GATE_TRAP;
+       idtentry.segment = __KERNEL_CS;
+       idtentry.offset_low = (control_page & 0xFFFF) + vec_ofs;
+       idtentry.offset_middle = (control_page >> 16) & 0xFFFF;
+       idtentry.offset_high = control_page >> 32;
+
+       for (i = 0; i < 16; i++) {
+               kexec_debug_idt[i] = idtentry;
+               idtentry.offset_low += KEXEC_DEBUG_EXC_HANDLER_SIZE;
+       }
+#endif
+}
+
 int machine_kexec_prepare(struct kimage *image)
 {
        void *control_page = page_address(image->control_code_page);
@@ -321,6 +341,9 @@ int machine_kexec_prepare(struct kimage *image)
        if (image->type == KEXEC_TYPE_DEFAULT)
                kexec_pa_swap_page = page_to_pfn(image->swap_page) << 
PAGE_SHIFT;
 
+       prepare_debug_idt((unsigned long)__pa(control_page),
+                         (unsigned long)kexec_debug_exc_vectors - reloc_start);
+
        __memcpy(control_page, __relocate_kernel_start, reloc_end - 
reloc_start);
 
        set_memory_rox((unsigned long)control_page, 1);
diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index 6c6bfdccfe6a..2179f691a45b 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -49,6 +49,12 @@ SYM_DATA_START_LOCAL(kexec_debug_gdt)
        .quad   0x00af9a000000ffff      /* __KERNEL_CS */
        .quad   0x00cf92000000ffff      /* __KERNEL_DS */
 SYM_DATA_END_LABEL(kexec_debug_gdt, SYM_L_LOCAL, kexec_debug_gdt_end)
+
+       .balign 8
+SYM_DATA_START(kexec_debug_idt)
+       .skip 0x100, 0x00
+SYM_DATA_END(kexec_debug_idt)
+
 #endif /* CONFIG_KEXEC_DEBUG */
 
        .section .text.relocate_kernel,"ax";
@@ -108,6 +114,11 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
        int3
 SYM_CODE_END(relocate_kernel)
 
+#ifdef DEBUG
+       UNWIND_HINT_UNDEFINED
+       .balign 0x100   /* relocate_kernel will be overwritten with an IDT */
+#endif
+
 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        UNWIND_HINT_END_OF_STACK
        /*
@@ -137,6 +148,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        /* Test that we can load segments */
        movq    %ds, %rax
        movq    %rax, %ds
+
+       /* Now an IDTR on the stack to load the IDT the kernel created */
+       leaq    kexec_debug_idt(%rip), %rsi
+       pushq   %rsi
+       pushw   $0xff
+       lidt    (%rsp)
+       addq    $10, %rsp
+
+       //int3
 #endif /* CONFIG_KEXEC_DEBUG */
 
        /*
@@ -346,3 +366,65 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
        ret
        int3
 SYM_CODE_END(swap_pages)
+
+#ifdef CONFIG_KEXEC_DEBUG
+SYM_CODE_START_NOALIGN(kexec_debug_exc_vectors)
+       /* Each of these is 6 bytes. */
+.macro vec_err exc
+       UNWIND_HINT_ENTRY
+       . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE)
+       nop
+       nop
+       pushq   $\exc
+       jmp     exc_handler
+.endm
+
+.macro vec_noerr exc
+       UNWIND_HINT_ENTRY
+       . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE)
+       pushq   $0
+       pushq   $\exc
+       jmp     exc_handler
+.endm
+
+       ANNOTATE_NOENDBR
+       vec_noerr 0 // #DE
+       vec_noerr 1 // #DB
+       vec_noerr 2 // #NMI
+       vec_noerr 3 // #BP
+       vec_noerr 4 // #OF
+       vec_noerr 5 // #BR
+       vec_noerr 6 // #UD
+       vec_noerr 7 // #NM
+       vec_err 8   // #DF
+       vec_noerr 9
+       vec_err 10 // #TS
+       vec_err 11 // #NP
+       vec_err 12 // #SS
+       vec_err 13 // #GP
+       vec_err 14 // #PF
+       vec_noerr 15
+SYM_CODE_END(kexec_debug_exc_vectors)
+
+SYM_CODE_START_LOCAL_NOALIGN(exc_handler)
+       pushq   %rax
+       pushq   %rdx
+       movw    $0x3f8, %dx
+       movb    $'A', %al
+       outb    %al, %dx
+       popq    %rdx
+       popq    %rax
+
+       /* Only return from int3 */
+       cmpq    $3, (%rsp)
+       jne     .Ldie
+
+       addq    $16, %rsp
+       iretq
+
+.Ldie:
+       hlt
+       jmp     .Ldie
+
+SYM_CODE_END(exc_handler)
+#endif /* CONFIG_KEXEC_DEBUG */
-- 
2.47.0


Reply via email to