On 18.07.2023 22:20, Shawn Anastasio wrote:
> +void __init boot_of_init(unsigned long vec)
> +{
> +    int bof_chosen;
> +
> +    of_vec = vec;
> +
> +    /* Get a handle to the default console */
> +    bof_chosen = of_finddevice("/chosen");
> +    of_getprop(bof_chosen, "stdout", &of_out, sizeof(of_out));
> +    of_out = be32_to_cpu(of_out);

Can any of these fail, and hence lead to ...

> +    early_printk_init(of_putchar);

... this better not getting invoked?

> --- a/xen/arch/ppc/ppc64/asm-offsets.c
> +++ b/xen/arch/ppc/ppc64/asm-offsets.c
> @@ -0,0 +1,59 @@
> +/*
> + * Generate definitions needed by assembly language modules.
> + * This code generates raw asm output which is post-processed
> + * to extract and format the required data.
> + */
> +
> +#include <asm/processor.h>
> +
> +#define DEFINE(_sym, _val)                                                 \
> +    asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \
> +                  : : "i" (_val) )

Nit: There's a blank missing after the opening paren, which will then want
the 2nd line to be indented by one more character. (Instead, as a matter of
your taste, you may omit the blank between the two colons.)

> +#define BLANK()                                                            \
> +    asm volatile ( "\n.ascii\"==><==\"" : : )
> +#define OFFSET(_sym, _str, _mem)                                           \
> +    DEFINE(_sym, offsetof(_str, _mem));
> +
> +/* base-2 logarithm */
> +#define __L2(_x)  (((_x) & 0x00000002) ?   1 : 0)
> +#define __L4(_x)  (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
> +#define __L8(_x)  (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
> +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
> +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
> +
> +void __dummy__(void)
> +{
> +    DEFINE(GPR_WIDTH, sizeof(unsigned long));
> +    DEFINE(FPR_WIDTH, sizeof(double));
> +
> +    OFFSET(UREGS_gprs, struct cpu_user_regs, gprs);
> +    OFFSET(UREGS_r0, struct cpu_user_regs, gprs[0]);
> +    OFFSET(UREGS_r1, struct cpu_user_regs, gprs[1]);
> +    OFFSET(UREGS_r13, struct cpu_user_regs, gprs[13]);
> +    OFFSET(UREGS_srr0, struct cpu_user_regs, srr0);
> +    OFFSET(UREGS_srr1, struct cpu_user_regs, srr1);
> +    OFFSET(UREGS_pc, struct cpu_user_regs, pc);
> +    OFFSET(UREGS_msr, struct cpu_user_regs, msr);
> +    OFFSET(UREGS_lr, struct cpu_user_regs, lr);
> +    OFFSET(UREGS_ctr, struct cpu_user_regs, ctr);
> +    OFFSET(UREGS_xer, struct cpu_user_regs, xer);
> +    OFFSET(UREGS_hid4, struct cpu_user_regs, hid4);
> +    OFFSET(UREGS_dar, struct cpu_user_regs, dar);
> +    OFFSET(UREGS_dsisr, struct cpu_user_regs, dsisr);
> +    OFFSET(UREGS_cr, struct cpu_user_regs, cr);
> +    OFFSET(UREGS_fpscr, struct cpu_user_regs, fpscr);
> +    DEFINE(UREGS_sizeof, sizeof(struct cpu_user_regs));
> +}
> +
> +/* TODO: Replace with BUILD_BUG_ON + IS_ALIGNED once we can use <xen/lib.h> 
> */
> +_Static_assert(sizeof(struct cpu_user_regs) % STACK_ALIGN == 0,
> +               "struct cpu_user_regs not stack aligned!");

But patch 1 makes BUILD_BUG_ON() available now.

> --- /dev/null
> +++ b/xen/arch/ppc/ppc64/of-call.S
> @@ -0,0 +1,83 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * Adapted from Linux's arch/powerpc/kernel/entry_64.S, with the
> + * following copyright notice:
> + *
> + *  PowerPC version
> + *    Copyright (C) 1995-1996 Gary Thomas ([email protected])
> + *  Rewritten by Cort Dougan ([email protected]) for PReP
> + *    Copyright (C) 1996 Cort Dougan <[email protected]>
> + *  Adapted for Power Macintosh by Paul Mackerras.
> + *  Low-level exception handlers and MMU support
> + *  rewritten by Paul Mackerras.
> + *    Copyright (C) 1996 Paul Mackerras.
> + *  MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).
> + */
> +
> +#include <asm/asm-offsets.h>
> +#include <asm/asm-defns.h>
> +#include <asm/msr.h>
> +
> +/* size of minimum stack frame that can hold an entire cpu_user_regs struct 
> */
> +#define STACK_SWITCH_FRAME_SIZE UREGS_sizeof
> +
> +    .section .init.text, "ax", @progbits
> +
> +ENTRY(enter_of)
> +    mflr %r0
> +    std %r0, 16(%r1)
> +    stdu %r1,-STACK_SWITCH_FRAME_SIZE(%r1) /* Save SP and create stack space 
> */

Nit: A blank after the comma would again be nice.

> +    /*
> +     * Because PROM is running in 32b mode, it clobbers the high order half
> +     * of all registers that it saves.  We therefore save those registers
> +     * PROM might touch to the stack.  (%r0, %r3-%r13 are caller saved)
> +     */
> +    SAVE_GPR(2, %r1)
> +    SAVE_GPR(13, %r1)
> +    SAVE_NVGPRS(%r1)
> +    mfcr %r10
> +    mfmsr %r11
> +    std %r10, UREGS_cr(%r1)
> +    std %r11, UREGS_msr(%r1)
> +
> +    /* Put PROM address in SRR0 */
> +    mtsrr0 %r4
> +
> +    /* Setup our trampoline return addr in LR */
> +    bcl 20, 31, .+4
> +0:  mflr %r4
> +    addi %r4, %r4, 1f - 0b
> +    mtlr %r4
> +
> +    /* Prepare a 32-bit mode big endian MSR */
> +    LOAD_IMM64(%r12, MSR_SF | MSR_LE)
> +    andc %r11, %r11, %r12
> +    mtsrr1 %r11
> +    rfid
> +
> +1:  /* Return from OF */
> +    FIXUP_ENDIAN
> +
> +    /* Just make sure that %r1 top 32 bits didn't get corrupt by OF */
> +    rldicl %r1, %r1, 0, 32
> +
> +    /* Restore the MSR (back to 64 bits) */
> +    ld %r0, UREGS_msr(%r1)
> +    mtmsrd %r0
> +    isync
> +
> +    /* Restore other registers */
> +    REST_GPR(2, %r1)
> +    REST_GPR(13, %r1)
> +    REST_NVGPRS(%r1)
> +    ld %r4, UREGS_cr(%r1)
> +    mtcr %r4
> +
> +    addi %r1, %r1, STACK_SWITCH_FRAME_SIZE
> +    ld %r0, 16(%r1)
> +    mtlr %r0
> +    blr
> +
> +    .size enter_of, . - enter_of
> +    .type enter_of, %function

Before you/we grow more assembly code, may I re-raise a request regarding
readability: I think it would be nice if operands started at a fixed column,
unless the insn mnemonic is unusually long. Where exactly to draw the line
is up to each archtecture; on x86 we use 8 positions from the start of the
mnemonic.

Jan

Reply via email to