All APs use the same common stack to initialization. after initialization, APs should switch to the stack of its own. the sequences are: 1. BSP detects AP count 2. BSP allocates AP stack spaces 3. AP grabs a lock 4. AP SwitchStack 5. AP releases lock
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Chen Fan <[email protected]> --- UefiCpuPkg/CpuDxe/CpuMp.c | 41 ++++++++++++++++++++++++--- UefiCpuPkg/CpuDxe/CpuMp.h | 22 +++++++++++++++ UefiCpuPkg/CpuDxe/Ia32/MpAsm.S | 53 +++++++++++++++++++++++++++++++++- UefiCpuPkg/CpuDxe/X64/MpAsm.S | 64 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 174 insertions(+), 6 deletions(-) diff --git a/UefiCpuPkg/CpuDxe/CpuMp.c b/UefiCpuPkg/CpuDxe/CpuMp.c index eda70e3..2f3516b 100644 --- a/UefiCpuPkg/CpuDxe/CpuMp.c +++ b/UefiCpuPkg/CpuDxe/CpuMp.c @@ -17,9 +17,29 @@ VOID *mCommonStack = 0; VOID *mTopOfApCommonStack = 0; +VOID *mApStackStart = 0; +UINTN mApStackSize = 0; +volatile UINTN mIndexOfProcessors; UINTN mNumberOfProcessors; BOOLEAN mAllApsInitFinished = FALSE; +UINTN mApDoneCount = 0; + +VOID +ProcessorToIdleState ( + IN VOID *Context1 OPTIONAL, + IN VOID *context2 OPTIONAL + ) +{ + DEBUG ((DEBUG_INFO, "detect Apic id: %d\n", + GetApicId())); + + mIndexOfProcessors++; + mApDoneCount++; + AsmApReleaseLock (); + + CpuDeadLoop (); +} VOID EFIAPI @@ -33,6 +53,16 @@ ApEntryPointInC ( /* Wait for all Aps complete to initialization */ while (!mAllApsInitFinished); + /* Switch to stack of the Ap's own */ + AsmApSwitchStack ( + (SWITCH_STACK_ENTRY_POINT)(UINTN) ProcessorToIdleState, + NULL, + NULL, + mApStackStart + ); + + /* never be here */ + ASSERT (FALSE); CpuDeadLoop (); } @@ -42,13 +72,14 @@ InitializeMpSupport ( VOID ) { - mCommonStack = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_64KB)); - mTopOfApCommonStack = (VOID*) ((UINTN)mCommonStack + SIZE_64KB); + mCommonStack = AllocatePages (EFI_SIZE_TO_PAGES (AP_STACK_SIZE)); + mTopOfApCommonStack = (VOID*) ((UINTN)mCommonStack + AP_STACK_SIZE); if (mCommonStack == NULL) { return; } mNumberOfProcessors = 1; + mIndexOfProcessors = 1; StartApsStackless (AsmApEntryPoint); @@ -59,14 +90,16 @@ InitializeMpSupport ( } DEBUG ((DEBUG_ERROR, "Detect CPU count: %d\n", mNumberOfProcessors)); + mApStackStart = AllocatePages (EFI_SIZE_TO_PAGES ((mNumberOfProcessors - 1) * AP_STACK_SIZE)); + mApStackSize = AP_STACK_SIZE; mAllApsInitFinished = TRUE; - CpuDeadLoop (); + while (mApDoneCount != (mNumberOfProcessors - 1)); EXIT: mTopOfApCommonStack = NULL; - FreePages (mCommonStack, EFI_SIZE_TO_PAGES (SIZE_64KB)); + FreePages (mCommonStack, EFI_SIZE_TO_PAGES (AP_STACK_SIZE)); mCommonStack = NULL; } diff --git a/UefiCpuPkg/CpuDxe/CpuMp.h b/UefiCpuPkg/CpuDxe/CpuMp.h index 35394f7..69731f7 100644 --- a/UefiCpuPkg/CpuDxe/CpuMp.h +++ b/UefiCpuPkg/CpuDxe/CpuMp.h @@ -15,6 +15,7 @@ #ifndef _CPU_MP_H_ #define _CPU_MP_H_ +#define AP_STACK_SIZE SIZE_64KB #define STALL_100_MILLI_SECOND (1000 * 100) VOID InitializeMpSupport ( @@ -44,5 +45,26 @@ AsmApDoneWithCommonStack ( VOID ); +VOID +EFIAPI +AsmApDoneWithCommonStack ( + VOID + ); + +VOID +EFIAPI +AsmApSwitchStack ( + IN SWITCH_STACK_ENTRY_POINT EntryPoint, + IN VOID *Context1, OPTIONAL + IN VOID *Context2, OPTIONAL + IN VOID *NewStack + ); + +VOID +EFIAPI +AsmApReleaseLock ( + VOID + ); + #endif // _CPU_MP_H_ diff --git a/UefiCpuPkg/CpuDxe/Ia32/MpAsm.S b/UefiCpuPkg/CpuDxe/Ia32/MpAsm.S index bc42c38..4a41ae2 100644 --- a/UefiCpuPkg/CpuDxe/Ia32/MpAsm.S +++ b/UefiCpuPkg/CpuDxe/Ia32/MpAsm.S @@ -10,7 +10,7 @@ # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # # - +#include "CpuMp.h" # # point to the external interrupt vector table @@ -61,3 +61,54 @@ ASM_PFX(AsmApDoneWithCommonStack): lock btcl $0, ApStackLock ret +#------------------------------------------------------------------------------ +# VOID +# EFIAPI +# AsmApSwitchStack ( +# IN SWITCH_STACK_ENTRY_POINT EntryPoint, +# IN VOID *Context1, OPTIONAL +# IN VOID *Context2, OPTIONAL +# IN VOID *NewStack +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(AsmApSwitchStack) +ASM_PFX(AsmApSwitchStack): + pushl %ebp + movl %esp, %ebp + +AsmApAquireStackLock: +lock btsl $0, ApStackLock + pause + jc AsmApAquireStackLock + + # calculate the new stack top address of each AP's: + # %rsp = NewStack + AP_STACK_SIZE * mIndexOfProcessors + # + movl $AP_STACK_SIZE, %edi + movl ASM_PFX(mIndexOfProcessors), %ebx + imul %edi, %ebx + movl 20(%ebp), %eax + addl %ebx, %eax + + movl %eax, %esp # switch stack + subl $8, %esp + + movl 16(%ebp), %eax + movl %eax, 4(%esp) + movl 12(%ebp), %eax + movl %eax, (%esp) + pushl $0 # keeps gdb from unwinding stack + jmp *8(%ebp) # call and never return + +#------------------------------------------------------------------------------ +# VOID +# EFIAPI +# AsmApReleaseLock ( +# VOID +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(AsmApReleaseLock) +ASM_PFX(AsmApReleaseLock): +lock btcl $0, ApStackLock + ret + diff --git a/UefiCpuPkg/CpuDxe/X64/MpAsm.S b/UefiCpuPkg/CpuDxe/X64/MpAsm.S index bf488d3..e6da2d0 100644 --- a/UefiCpuPkg/CpuDxe/X64/MpAsm.S +++ b/UefiCpuPkg/CpuDxe/X64/MpAsm.S @@ -10,7 +10,7 @@ # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # # - +#include "CpuMp.h" # # point to the external interrupt vector table @@ -61,3 +61,65 @@ ASM_PFX(AsmApDoneWithCommonStack): lock btcl $0, ApStackLock ret +#------------------------------------------------------------------------------ +# VOID +# EFIAPI +# AsmApSwitchStack ( +# IN SWITCH_STACK_ENTRY_POINT EntryPoint, +# IN VOID *Context1, OPTIONAL +# IN VOID *Context2, OPTIONAL +# IN VOID *NewStack +# ); +# Arguments: +# +# (rcx) EntryPoint - Entry point with new stack. +# (rdx) Context1 - Parameter1 for entry point. +# (r8) Context2 - Parameter2 for entry point. +# (r9) NewStack - The pointer to new stack. +# +# Returns: +# +# None +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(AsmApSwitchStack) +ASM_PFX(AsmApSwitchStack): + pushq %rbp + movq %rsp, %rbp + +AsmApAquireStackLock: +lock btsl $0, ApStackLock + pause + jc AsmApAquireStackLock + + mov %rcx, %rax + mov %rdx, %rcx + mov %r8, %rdx + + # calculate the new stack top address of each AP's: + # %rsp = NewStack + AP_STACK_SIZE * mIndexOfProcessors + # + movl $AP_STACK_SIZE, %edi + movl ASM_PFX(mIndexOfProcessors), %ebx + imul %edi, %ebx + addq %rbx, %r9 + # + # Reserve space for register parameters (rcx, rdx, r8 & r9) on the stack, + # in case the caller wishes to spill them. + # + lea -0x20(%r9), %rsp + pushq $0 # keeps gdb from unwinding stack + + jmp *%rax # call EntryPoint () + +#------------------------------------------------------------------------------ +# VOID +# EFIAPI +# AsmApReleaseLock ( +# VOID +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(AsmApReleaseLock) +ASM_PFX(AsmApReleaseLock): +lock btcl $0, ApStackLock + ret + -- 1.9.3 ------------------------------------------------------------------------------ Want excitement? Manually upgrade your production database. When you want reliability, choose Perforce Perforce version control. Predictably reliable. http://pubads.g.doubleclick.net/gampad/clk?id=157508191&iu=/4140/ostg.clktrk _______________________________________________ edk2-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/edk2-devel
