On Intel, the APs are left in a well documented state after TXT performs
the late launch. Specifically they cannot have #INIT asserted on them so
a standard startup via INIT/SIPI/SIPI cannot be performed. Instead the
early SL stub code uses MONITOR and MWAIT to park the APs. The realmode/init.c
code updates the jump address for the waiting APs with the location of the
Secure Launch entry point in the rmpiggy image.

The rmpiggy image is a payload contained in the kernel used to start the
APs (in 16b or 32b modes). It is loaded at runtime so its location and entry 
point
must be updated in the long jump for the waiting APs by the running
kernel.

As the APs are woken up by writing the monitor, the APs jump to the Secure
Launch entry point in the rmpiggy which mimics what the real mode code would
do then jumps to the standard rmpiggy protected mode entry point.

Signed-off-by: Ross Philipson <[email protected]>
---
 arch/x86/include/asm/realmode.h      |  3 ++
 arch/x86/kernel/slaunch.c            | 26 +++++++++++++++
 arch/x86/kernel/smpboot.c            | 47 ++++++++++++++++++++++++++--
 arch/x86/realmode/init.c             |  8 +++++
 arch/x86/realmode/rm/header.S        |  3 ++
 arch/x86/realmode/rm/trampoline_64.S | 32 +++++++++++++++++++
 6 files changed, 117 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index e406a1e92c63..e3336c49d26b 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -38,6 +38,9 @@ struct real_mode_header {
 #ifdef CONFIG_X86_64
        u32     machine_real_restart_seg;
 #endif
+#ifdef CONFIG_SECURE_LAUNCH
+       u32     sl_trampoline_start32;
+#endif
 };
 
 /* This must match data at realmode/rm/trampoline_{32,64}.S */
diff --git a/arch/x86/kernel/slaunch.c b/arch/x86/kernel/slaunch.c
index 6958734fe5e9..0699cbf41753 100644
--- a/arch/x86/kernel/slaunch.c
+++ b/arch/x86/kernel/slaunch.c
@@ -507,3 +507,29 @@ void __init slaunch_setup(void)
        if (boot_cpu_has(X86_FEATURE_SMX))
                slaunch_setup_txt();
 }
+
+/*
+ * After a launch, the APs are woken up, enter the DRTM and are left to
+ * wait for a wakeup call on a MONITOR address. The block where they are
+ * idle has a long jump to the AP startup code in the mainline kernel.
+ * This address has to be calculated at runtime and "fixed up" to point
+ * to the SL startup location in the rmpiggy SMP startup image. This image
+ * is loaded into separate  memory at kernel start time.
+ */
+void __init slaunch_fixup_ap_wake_vector(void)
+{
+       struct sl_ap_wake_info *ap_wake_info;
+       u32 *ap_jmp_ptr;
+
+       if (!slaunch_is_txt_launch())
+               return;
+
+       ap_wake_info = slaunch_get_ap_wake_info();
+
+       ap_jmp_ptr = (u32 *)__va(ap_wake_info->ap_wake_block +
+                                ap_wake_info->ap_jmp_offset);
+
+       *ap_jmp_ptr = real_mode_header->sl_trampoline_start32;
+
+       pr_info("TXT AP startup vector address updated\n");
+}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index eb289abece23..c351280b1cbc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -61,6 +61,7 @@
 #include <linux/cpuhotplug.h>
 #include <linux/mc146818rtc.h>
 #include <linux/acpi.h>
+#include <linux/slaunch.h>
 
 #include <asm/acpi.h>
 #include <asm/cacheinfo.h>
@@ -833,6 +834,45 @@ int common_cpu_up(unsigned int cpu, struct task_struct 
*idle)
        return 0;
 }
 
+#if (IS_ENABLED(CONFIG_SECURE_LAUNCH))
+
+/*
+ * TXT AP startup is quite different than normal. The APs cannot have #INIT
+ * asserted on them or receive SIPIs. The early Secure Launch code has parked
+ * the APs using MONITOR/MWAIT in the safe AP wake block area (details in
+ * sl_stub.S). The SMP boot will wake the APs by writing the MONITOR associated
+ * with the AP and have them jump to the protected mode code in the rmpiggy 
where
+ * the rest of the SMP boot of the AP will proceed normally.
+ *
+ * Intel Trusted Execution Technology (TXT) Software Development Guide
+ * Section 2.3 -  MLE Initialization
+ */
+static void slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
+{
+       struct sl_ap_stack_and_monitor *stack_monitor;
+       struct sl_ap_wake_info *ap_wake_info;
+
+       ap_wake_info = slaunch_get_ap_wake_info();
+
+       stack_monitor = (struct sl_ap_stack_and_monitor 
*)__va(ap_wake_info->ap_wake_block +
+                                                              
ap_wake_info->ap_stacks_offset);
+
+       for (unsigned int i = SL_MAX_CPUS - 1; i >= 0; i--) {
+               if (stack_monitor[i].apicid == apicid) {
+                       stack_monitor[i].monitor = 1;
+                       break;
+               }
+       }
+}
+
+#else
+
+static inline void slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
+{
+}
+
+#endif  /* IS_ENABLED(CONFIG_SECURE_LAUNCH) */
+
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -842,7 +882,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct 
*idle)
 static int do_boot_cpu(u32 apicid, unsigned int cpu, struct task_struct *idle)
 {
        unsigned long start_ip = real_mode_header->trampoline_start;
-       int ret;
+       int ret = 0;
 
 #ifdef CONFIG_X86_64
        /* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */
@@ -887,12 +927,15 @@ static int do_boot_cpu(u32 apicid, unsigned int cpu, 
struct task_struct *idle)
 
        /*
         * Wake up a CPU in difference cases:
+        * - Intel TXT DRTM launch uses its own method to wake the APs
         * - Use a method from the APIC driver if one defined, with wakeup
         *   straight to 64-bit mode preferred over wakeup to RM.
         * Otherwise,
         * - Use an INIT boot APIC message
         */
-       if (apic->wakeup_secondary_cpu_64)
+       if (slaunch_is_txt_launch())
+               slaunch_wakeup_cpu_from_txt(cpu, apicid);
+       else if (apic->wakeup_secondary_cpu_64)
                ret = apic->wakeup_secondary_cpu_64(apicid, start_ip, cpu);
        else if (apic->wakeup_secondary_cpu)
                ret = apic->wakeup_secondary_cpu(apicid, start_ip, cpu);
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 88be32026768..ded4dafc6a0a 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -4,6 +4,7 @@
 #include <linux/memblock.h>
 #include <linux/cc_platform.h>
 #include <linux/pgtable.h>
+#include <linux/slaunch.h>
 
 #include <asm/set_memory.h>
 #include <asm/realmode.h>
@@ -213,6 +214,13 @@ void __init init_real_mode(void)
 
        setup_real_mode();
        set_real_mode_permissions();
+
+       /*
+        * If Secure Launch is active, it will use the rmpiggy to do the TXT AP
+        * startup. Secure Launch has its own entry stub in the rmpiggy and 
this prepares
+        * it for SMP boot.
+        */
+       slaunch_fixup_ap_wake_vector();
 }
 
 static int __init do_init_real_mode(void)
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
index 2eb62be6d256..3b5cbcbbfc90 100644
--- a/arch/x86/realmode/rm/header.S
+++ b/arch/x86/realmode/rm/header.S
@@ -37,6 +37,9 @@ SYM_DATA_START(real_mode_header)
 #ifdef CONFIG_X86_64
        .long   __KERNEL32_CS
 #endif
+#ifdef CONFIG_SECURE_LAUNCH
+       .long   pa_sl_trampoline_start32
+#endif
 SYM_DATA_END(real_mode_header)
 
        /* End signature, used to verify integrity */
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index 14d9c7daf90f..b0ce6205d7ea 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -122,6 +122,38 @@ SYM_CODE_END(sev_es_trampoline_start)
 
        .section ".text32","ax"
        .code32
+#ifdef CONFIG_SECURE_LAUNCH
+       .balign 4
+SYM_CODE_START(sl_trampoline_start32)
+       /*
+        * The early secure launch stub AP wakeup code has taken care of all
+        * the vagaries of launching out of TXT. This bit just mimics what the
+        * 16b entry code does and jumps off to the real startup_32.
+        */
+       cli
+       wbinvd
+
+       /*
+        * The %ebx provided is not terribly useful since it is the physical
+        * address of tb_trampoline_start and not the base of the image.
+        * Use pa_real_mode_base, which is fixed up, to get a run time
+        * base register to use for offsets to location that do not have
+        * pa_ symbols.
+        */
+       movl    $pa_real_mode_base, %ebx
+
+       LOCK_AND_LOAD_REALMODE_ESP lock_pa=1
+
+       lgdt    tr_gdt(%ebx)
+       lidt    tr_idt(%ebx)
+
+       movw    $__KERNEL_DS, %dx       # Data segment descriptor
+
+       /* Jump to where the 16b code would have jumped */
+       ljmpl   $__KERNEL32_CS, $pa_startup_32
+SYM_CODE_END(sl_trampoline_start32)
+#endif
+
        .balign 4
 SYM_CODE_START(startup_32)
        movl    %edx, %ss
-- 
2.43.7


Reply via email to