Re: [PATCHv2 13/13] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method

2023-11-01 Thread Kirill A. Shutemov
On Sun, Oct 29, 2023 at 06:31:36PM +0100, Thomas Gleixner wrote:
> On Fri, Oct 20 2023 at 18:12, Kirill A. Shutemov wrote:
> 
> > MADT Multiprocessor Wakeup structure version 1 brings support of CPU
> > offlining: BIOS provides a reset vector where the CPU has to jump to
> > offline itself. The new TEST mailbox command can be used to test the CPU
> > offlined successfully and BIOS has control over it.
> >
> > Add CPU offling support for ACPI MADT wakeup method by implementing
> > custom cpu_die, play_dead and stop_other_cpus SMP operations.
> >
> > CPU offlining makes possible to hand over secondary CPUs over kexec, not
> 
> makes it possible
> 
> > limiting the second kernel with single CPU.
> 
> s/with/to/

Okay.

> > The change conforms to the approved ACPI spec change proposal. See the
> > +SYM_FUNC_START(asm_acpi_mp_play_dead)
> > +   /* Load address of reset vector into RCX to jump when kernel is ready */
> > +   movqacpi_mp_reset_vector_paddr(%rip), %rcx
> > +
> > +   /* Turn off global entries. Following CR3 write will flush them. */
> > +   movq%cr4, %rdx
> > +   andq$~(X86_CR4_PGE), %rdx
> > +   movq%rdx, %cr4
> > +
> > +   /* Switch to identity mapping */
> > +   movqacpi_mp_pgd(%rip), %rax
> > +   movq%rax, %cr3
> 
> You can just make this function:
> 
> asm_acpi_mp_play_dead(u64 reset_vector, u64 pgd_pa);
> 
> then you have the reset vector in RDI and the pgd in RSI and spare the
> global variables.

Yeah, it is better. Thanks.

> >  /* Physical address of the Multiprocessor Wakeup Structure mailbox */
> > @@ -11,6 +16,150 @@ static u64 acpi_mp_wake_mailbox_paddr;
> >  /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
> >  static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
> >  
> > +u64 acpi_mp_pgd;
> > +u64 acpi_mp_reset_vector_paddr;
> 
> See above (static) and __ro_after_init please
> 
> > +
> > +void asm_acpi_mp_play_dead(void);
> > +
> > +static void __init *alloc_pgt_page(void *context)
> > +{
> 
> What's the purpose of the context argument?

To conform to x86_mapping_info::alloc_pgt_page type.

I will rename the argument to 'dummy' and add comment.

> > +   return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> > +}
> > +
> > +/*
> > + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
> > + * the same place as in the kernel page tables. The function switches to
> > + * the identity mapping and has be present at the same spot in before and
> > + * after transition.
> 
> Why does it need to be there after the CPU jumped to the reset vector?

After transition to the identity mapping, not after jumping to reset
vector. I will adjust the comment.

> > + */
> > +static int __init init_transition_pgtable(pgd_t *pgd)
> > +{
> > +   pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
> > +   unsigned long vaddr, paddr;
> > +   int result = -ENOMEM;
> > +   p4d_t *p4d;
> > +   pud_t *pud;
> > +   pmd_t *pmd;
> > +   pte_t *pte;
> > +
> > +   vaddr = (unsigned long)asm_acpi_mp_play_dead;
> > +   pgd += pgd_index(vaddr);
> > +   if (!pgd_present(*pgd)) {
> > +   p4d = (p4d_t *)alloc_pgt_page(NULL);
> > +   if (!p4d)
> > +   goto err;
> 
> return -ENOMEM?
> 
> the error labels is pretty silly without an actual cleanup, right?

Right.

> > +   set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
> > +   }
> > +   p4d = p4d_offset(pgd, vaddr);
> > +   if (!p4d_present(*p4d)) {
> > +   pud = (pud_t *)alloc_pgt_page(NULL);
> > +   if (!pud)
> > +   goto err;
> 
> Ditto. But what mops up the already allocated page above?

Oops. I will add cleanup in acpi_mp_setup_reset() if
kernel_ident_mapping_init() or init_transition_pgtable() fails.

> > +   set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
> > +
> > +   return 0;
> > +err:
> > +   return result;
> > +}
> 
> Can you please move that function to the place where it is used?

Sure.

> > +
> > +static void acpi_mp_play_dead(void)
> > +{
> > +   play_dead_common();
> > +   asm_acpi_mp_play_dead();
> > +}
> > +
> > +static void acpi_mp_cpu_die(unsigned int cpu)
> > +{
> > +   int apicid = per_cpu(x86_cpu_to_apicid, cpu);
> 
> u32 apicid

Okay.

> > +   unsigned long timeout;
> > +
> > +   /*
> > +* Use TEST mailbox command to prove that BIOS got control over
> > +* the CPU before declaring it dead.
> > +*
> > +* BIOS has to clear 'command' field of the mailbox.
> > +*/
> > +   acpi_mp_wake_mailbox->apic_id = apicid;
> > +   smp_store_release(_mp_wake_mailbox->command,
> > + ACPI_MP_WAKE_COMMAND_TEST);
> > +
> > +   /* Don't wait longer than a second. */
> > +   timeout = USEC_PER_SEC;
> > +   while (READ_ONCE(acpi_mp_wake_mailbox->command) && timeout--)
> > +   udelay(1);
> > +}
> > +
> > +static void acpi_mp_stop_other_cpus(int wait)
> > +{
> > +   smp_shutdown_nonboot_cpus(smp_processor_id());
> 
> This clearly was never tested with lockdep. At the point where

Re: [PATCHv2 13/13] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method

2023-10-29 Thread Thomas Gleixner
On Fri, Oct 20 2023 at 18:12, Kirill A. Shutemov wrote:

> MADT Multiprocessor Wakeup structure version 1 brings support of CPU
> offlining: BIOS provides a reset vector where the CPU has to jump to
> offline itself. The new TEST mailbox command can be used to test the CPU
> offlined successfully and BIOS has control over it.
>
> Add CPU offling support for ACPI MADT wakeup method by implementing
> custom cpu_die, play_dead and stop_other_cpus SMP operations.
>
> CPU offlining makes possible to hand over secondary CPUs over kexec, not

makes it possible

> limiting the second kernel with single CPU.

s/with/to/

> The change conforms to the approved ACPI spec change proposal. See the
> +SYM_FUNC_START(asm_acpi_mp_play_dead)
> + /* Load address of reset vector into RCX to jump when kernel is ready */
> + movqacpi_mp_reset_vector_paddr(%rip), %rcx
> +
> + /* Turn off global entries. Following CR3 write will flush them. */
> + movq%cr4, %rdx
> + andq$~(X86_CR4_PGE), %rdx
> + movq%rdx, %cr4
> +
> + /* Switch to identity mapping */
> + movqacpi_mp_pgd(%rip), %rax
> + movq%rax, %cr3

You can just make this function:

asm_acpi_mp_play_dead(u64 reset_vector, u64 pgd_pa);

then you have the reset vector in RDI and the pgd in RSI and spare the
global variables.

>  
>  /* Physical address of the Multiprocessor Wakeup Structure mailbox */
> @@ -11,6 +16,150 @@ static u64 acpi_mp_wake_mailbox_paddr;
>  /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
>  static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
>  
> +u64 acpi_mp_pgd;
> +u64 acpi_mp_reset_vector_paddr;

See above (static) and __ro_after_init please

> +
> +void asm_acpi_mp_play_dead(void);
> +
> +static void __init *alloc_pgt_page(void *context)
> +{

What's the purpose of the context argument?

> + return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +}
> +
> +/*
> + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
> + * the same place as in the kernel page tables. The function switches to
> + * the identity mapping and has be present at the same spot in before and
> + * after transition.

Why does it need to be there after the CPU jumped to the reset vector?

> + */
> +static int __init init_transition_pgtable(pgd_t *pgd)
> +{
> + pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
> + unsigned long vaddr, paddr;
> + int result = -ENOMEM;
> + p4d_t *p4d;
> + pud_t *pud;
> + pmd_t *pmd;
> + pte_t *pte;
> +
> + vaddr = (unsigned long)asm_acpi_mp_play_dead;
> + pgd += pgd_index(vaddr);
> + if (!pgd_present(*pgd)) {
> + p4d = (p4d_t *)alloc_pgt_page(NULL);
> + if (!p4d)
> + goto err;

return -ENOMEM?

the error labels is pretty silly without an actual cleanup, right?

> + set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
> + }
> + p4d = p4d_offset(pgd, vaddr);
> + if (!p4d_present(*p4d)) {
> + pud = (pud_t *)alloc_pgt_page(NULL);
> + if (!pud)
> + goto err;

Ditto. But what mops up the already allocated page above?
> + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
> +
> + return 0;
> +err:
> + return result;
> +}

Can you please move that function to the place where it is used?

> +
> +static void acpi_mp_play_dead(void)
> +{
> + play_dead_common();
> + asm_acpi_mp_play_dead();
> +}
> +
> +static void acpi_mp_cpu_die(unsigned int cpu)
> +{
> + int apicid = per_cpu(x86_cpu_to_apicid, cpu);

u32 apicid

> + unsigned long timeout;
> +
> + /*
> +  * Use TEST mailbox command to prove that BIOS got control over
> +  * the CPU before declaring it dead.
> +  *
> +  * BIOS has to clear 'command' field of the mailbox.
> +  */
> + acpi_mp_wake_mailbox->apic_id = apicid;
> + smp_store_release(_mp_wake_mailbox->command,
> +   ACPI_MP_WAKE_COMMAND_TEST);
> +
> + /* Don't wait longer than a second. */
> + timeout = USEC_PER_SEC;
> + while (READ_ONCE(acpi_mp_wake_mailbox->command) && timeout--)
> + udelay(1);
> +}
> +
> +static void acpi_mp_stop_other_cpus(int wait)
> +{
> + smp_shutdown_nonboot_cpus(smp_processor_id());

This clearly was never tested with lockdep. At the point where
stop_other_cpus() is invoked the invoking CPU has interrupts disabled...

> +}
> +
> +static void acpi_mp_crash_stop_other_cpus(void)
> +{
> + smp_shutdown_nonboot_cpus(smp_processor_id());

Yuck. Crash can happen at arbitrary places. So you really cannot invoke
the whole CPU hotplug state machine from here.

There is a reason why the other implementation just kick CPUs into some
"safe" state.

> + /* The kernel is broken so disable interrupts */
> + local_irq_disable();
> +}
> +
> +static int __init acpi_mp_setup_reset(u64 reset_vector)
> +{
> + pgd_t *pgd;
> + struct x86_mapping_info info = {
> + 

Re: [PATCHv2 13/13] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method

2023-10-27 Thread kirill.shute...@linux.intel.com
On Tue, Oct 24, 2023 at 10:11:58AM +, Huang, Kai wrote:
> 
> > --- /dev/null
> > +++ b/arch/x86/kernel/acpi/madt.S
> 
> I think the name 'madt.S' is too generic.  How about something be more 
> specific
> such as madt_reset.S, or madt_playdead.S, etc? 

Okay, madt_playdead.S sounds good.

> > @@ -11,6 +16,150 @@ static u64 acpi_mp_wake_mailbox_paddr;
> >  /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
> >  static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
> >  
> > +u64 acpi_mp_pgd;
> > +u64 acpi_mp_reset_vector_paddr;
> > +
> > +void asm_acpi_mp_play_dead(void);
> > +
> > +static void __init *alloc_pgt_page(void *context)
> > +{
> > +   return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> > +}
> > +
> > +/*
> > + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
> > + * the same place as in the kernel page tables. The function switches to
> > + * the identity mapping 
> > 
> 
> This function itself doesn't switch to the identity mapping.  It just creates
> the kernel mapping for asm_acpi_mp_play_dead() in the identify mapping page
> table.

By "The function" I meant asm_acpi_mp_play_dead(). Yeah, it is not clear.

Will so s/The function/asm_acpi_mp_play_dead()/

> > -   cpu_hotplug_disable_offlining();
> > +   if (mp_wake->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1 &&
> > +   mp_wake->header.length >= ACPI_MADT_MP_WAKEUP_SIZE_V1) {
> > +   acpi_mp_setup_reset(mp_wake->reset_vector);
> 
> It's better to fallback to "disable offline" if this function fails.
> 

Okay, will warn to disable offlining.

-- 
  Kiryl Shutsemau / Kirill A. Shutemov

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCHv2 13/13] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method

2023-10-24 Thread Huang, Kai


> > +   .text
> > +   .align PAGE_SIZE
> > +SYM_FUNC_START(asm_acpi_mp_play_dead)
> > +   /* Load address of reset vector into RCX to jump when kernel is ready */
> > +   movqacpi_mp_reset_vector_paddr(%rip), %rcx
> > +
> > +   /* Turn off global entries. Following CR3 write will flush them. */
> > +   movq%cr4, %rdx
> > +   andq$~(X86_CR4_PGE), %rdx
> > +   movq%rdx, %cr4
> > +
> > +   /* Switch to identity mapping */
> > +   movqacpi_mp_pgd(%rip), %rax
> > +   movq%rax, %cr3
> 
> Do we need to switch back to kernel direct-map page table after CPU is wake up
> again?  We do support normal CPU offline/online, but not limited to kexec,
> right?

Please ignore this.  I found if I am reading right even for TDX guest the new
online cpu will start with trampoline_start64 assembly, so it will load kernel
page table anyway.  Sorry for the noise.

[...]


> > +   for (int i = 0; i < nr_pfn_mapped; i++) {
> > +   unsigned long mstart, mend;
> > +   mstart = pfn_mapped[i].start << PAGE_SHIFT;
> > +   mend   = pfn_mapped[i].end << PAGE_SHIFT;
> > +   if (kernel_ident_mapping_init(, pgd, mstart, mend))
> > +   return -ENOMEM;
> > +   }
> 
> This is for kexec() IIUC.  Add a comment?
> 
> If we consider normal CPU offline/online case, then I don't think we need the
> identity mapping for all memory?
> 

Also this one. :-)

> 

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCHv2 13/13] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method

2023-10-24 Thread Huang, Kai

> --- /dev/null
> +++ b/arch/x86/kernel/acpi/madt.S

I think the name 'madt.S' is too generic.  How about something be more specific
such as madt_reset.S, or madt_playdead.S, etc? 

> @@ -0,0 +1,24 @@
> +#include 
> +#include 
> +#include 
> +#include 
> +
> + .text
> + .align PAGE_SIZE
> +SYM_FUNC_START(asm_acpi_mp_play_dead)
> + /* Load address of reset vector into RCX to jump when kernel is ready */
> + movqacpi_mp_reset_vector_paddr(%rip), %rcx
> +
> + /* Turn off global entries. Following CR3 write will flush them. */
> + movq%cr4, %rdx
> + andq$~(X86_CR4_PGE), %rdx
> + movq%rdx, %cr4
> +
> + /* Switch to identity mapping */
> + movqacpi_mp_pgd(%rip), %rax
> + movq%rax, %cr3

Do we need to switch back to kernel direct-map page table after CPU is wake up
again?  We do support normal CPU offline/online, but not limited to kexec,
right?

> +
> + /* Jump to reset vector */
> + ANNOTATE_RETPOLINE_SAFE
> + jmp *%rcx
> +SYM_FUNC_END(asm_acpi_mp_play_dead)
> diff --git a/arch/x86/kernel/acpi/madt_wakeup.c 
> b/arch/x86/kernel/acpi/madt_wakeup.c
> index ad170def2367..f9ff14ee2892 100644
> --- a/arch/x86/kernel/acpi/madt_wakeup.c
> +++ b/arch/x86/kernel/acpi/madt_wakeup.c
> @@ -1,8 +1,13 @@
>  #include 
>  #include 
> +#include 
>  #include 
> +#include 
> +#include 
> +#include 
>  #include 
>  #include 
> +#include 
>  #include 
>  
>  /* Physical address of the Multiprocessor Wakeup Structure mailbox */
> @@ -11,6 +16,150 @@ static u64 acpi_mp_wake_mailbox_paddr;
>  /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
>  static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
>  
> +u64 acpi_mp_pgd;
> +u64 acpi_mp_reset_vector_paddr;
> +
> +void asm_acpi_mp_play_dead(void);
> +
> +static void __init *alloc_pgt_page(void *context)
> +{
> + return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +}
> +
> +/*
> + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
> + * the same place as in the kernel page tables. The function switches to
> + * the identity mapping 
> 

This function itself doesn't switch to the identity mapping.  It just creates
the kernel mapping for asm_acpi_mp_play_dead() in the identify mapping page
table.

> and has be present at the same spot in before and
> + * after transition.

This part doesn't parse to me.  I guess the whole comment can be:

asm_acpi_mp_play_dead() is accessed both before and after switching to 
the identity mapping.  Also map it at the kernel virtual address in
the identity mapping table.

Or perhaps even better, put the above comments to the place where this function
is called?

> + */
> +static int __init init_transition_pgtable(pgd_t *pgd)
> +{
> + pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
> + unsigned long vaddr, paddr;
> + int result = -ENOMEM;
> + p4d_t *p4d;
> + pud_t *pud;
> + pmd_t *pmd;
> + pte_t *pte;
> +
> + vaddr = (unsigned long)asm_acpi_mp_play_dead;
> + pgd += pgd_index(vaddr);
> + if (!pgd_present(*pgd)) {
> + p4d = (p4d_t *)alloc_pgt_page(NULL);
> + if (!p4d)
> + goto err;
> + set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
> + }
> + p4d = p4d_offset(pgd, vaddr);
> + if (!p4d_present(*p4d)) {
> + pud = (pud_t *)alloc_pgt_page(NULL);
> + if (!pud)
> + goto err;
> + set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
> + }
> + pud = pud_offset(p4d, vaddr);
> + if (!pud_present(*pud)) {
> + pmd = (pmd_t *)alloc_pgt_page(NULL);
> + if (!pmd)
> + goto err;
> + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
> + }
> + pmd = pmd_offset(pud, vaddr);
> + if (!pmd_present(*pmd)) {
> + pte = (pte_t *)alloc_pgt_page(NULL);
> + if (!pte)
> + goto err;
> + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
> + }
> + pte = pte_offset_kernel(pmd, vaddr);
> +
> + paddr = __pa(vaddr);
> + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
> +
> + return 0;
> +err:
> + return result;
> +}
> +
> +static void acpi_mp_play_dead(void)
> +{
> + play_dead_common();
> + asm_acpi_mp_play_dead();
> +}
> +
> +static void acpi_mp_cpu_die(unsigned int cpu)
> +{
> + int apicid = per_cpu(x86_cpu_to_apicid, cpu);
> + unsigned long timeout;
> +
> + /*
> +  * Use TEST mailbox command to prove that BIOS got control over
> +  * the CPU before declaring it dead.
> +  *
> +  * BIOS has to clear 'command' field of the mailbox.
> +  */
> + acpi_mp_wake_mailbox->apic_id = apicid;
> + smp_store_release(_mp_wake_mailbox->command,
> +   ACPI_MP_WAKE_COMMAND_TEST);
> +
> + /* Don't wait longer than a second. */
> + timeout = USEC_PER_SEC;
> + while