On 25 November 2015 at 00:37, Andrew Jones <drjo...@redhat.com> wrote: > Add the support needed for creating prstatus elf notes. This > allows us to use QMP dump-guest-memory.
> + > + if (is_a64(env)) { > + for (i = 0; i < 31; ++i) { > + note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->xregs[i]); > + } > + note.prstatus.pr_reg.sp = cpu_to_dump64(s, env->xregs[31]); > + note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc); > + note.prstatus.pr_reg.pstate = cpu_to_dump64(s, pstate_read(env)); > + } else { > + aarch64_sync_64_to_32(env); > + for (i = 0; i < 16; ++i) { > + note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->regs[i]); > + } > + note.prstatus.pr_reg.sp = note.prstatus.pr_reg.regs[13]; > + note.prstatus.pr_reg.pc = note.prstatus.pr_reg.regs[15]; > + note.prstatus.pr_reg.pstate = cpu_to_dump64(s, cpsr_read(env)); > + } This doesn't look right. sync_64_to_32 is copying the state held in the 64-bit env->xregs etc into the 32-bit env->regs. But if we're in 32-bit state then the true state is in the 32-bit fields and this will trash it. You want to sync_32_to_64 here, and then the code to write the values to the dump is the same either way (except for pstate vs cpsr which we haven't managed to clean up and unify yet, sadly). I think you want uint64_t pstate; [...] if (!is_a64(env)) { aarch64_sync_32_to_64(env); pstate = cpsr_read(env); } else { pstate = pstate_read(env); } for (i = 0; i < 31; ++i) { note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->xregs[i]); } note.prstatus.pr_reg.sp = cpu_to_dump64(s, env->xregs[31]); note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc); note.prstatus.pr_reg.pstate = cpu_to_dump64(s, pstate); (Note that the 32-bit SP is not architecturally in X31; it's in one of the other xregs, depending what mode the CPU was in. For 32-bit userspace that will be USR and it's in X13.) > + > + ret = f(¬e, sizeof(note), s); > + if (ret < 0) { > + return -1; > + } > + > + return 0; > +} > + > +/* struct pt_regs from arch/arm/include/asm/ptrace.h */ > +struct arm_user_regs { > + uint32_t regs[17]; > + char pad[4]; > +} QEMU_PACKED; > + > +QEMU_BUILD_BUG_ON(sizeof(struct arm_user_regs) != 72); > + > +/* struct elf_prstatus from include/uapi/linux/elfcore.h */ > +struct arm_elf_prstatus { > + char pad1[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */ > + uint32_t pr_pid; > + char pad2[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) - > + offsetof(struct elf_prstatus, pr_ppid) */ > + struct arm_user_regs pr_reg; > + int pr_fpvalid; > +} QEMU_PACKED arm_elf_prstatus; > + > +QEMU_BUILD_BUG_ON(sizeof(struct arm_elf_prstatus) != 148); > + > +struct arm_note { > + Elf32_Nhdr hdr; > + char name[QEMU_ALIGN_UP(NOTE_NAMESZ, 4)]; > + struct arm_elf_prstatus prstatus; > +} QEMU_PACKED; > + > +QEMU_BUILD_BUG_ON(sizeof(struct arm_note) != 168); > + > +static int > +arm_write_elf32_note(WriteCoreDumpFunction f, CPUARMState *env, > + int id, DumpState *s) > +{ > + struct arm_note note; > + int ret, i; > + > + memset(¬e, 0, sizeof(note)); > + > + note.hdr.n_namesz = cpu_to_dump32(s, NOTE_NAMESZ); > + note.hdr.n_descsz = cpu_to_dump32(s, sizeof(note.prstatus)); > + note.hdr.n_type = cpu_to_dump32(s, NT_PRSTATUS); > + > + memcpy(note.name, NOTE_NAME, NOTE_NAMESZ); > + note.prstatus.pr_pid = cpu_to_dump32(s, id); > + > + for (i = 0; i < 16; ++i) { > + note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->regs[i]); > + } > + note.prstatus.pr_reg.regs[16] = cpu_to_dump32(s, cpsr_read(env)); > + > + ret = f(¬e, sizeof(note), s); > + if (ret < 0) { > + return -1; > + } > + > + return 0; > +} > + > +int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, > + int cpuid, void *opaque) > +{ > + CPUARMState *env = &ARM_CPU(cs)->env; > + int ret; > + > + if (arm_el_is_aa64(env, 1)) { > + ret = aarch64_write_elf64_note(f, env, cpuid, opaque); > + } else { > + ret = arm_write_elf32_note(f, env, cpuid, opaque); > + } This might produce the wrong kind of dump if we're in EL2 or EL3 at the point we take it (can only happen in emulation and only once we add EL2 and EL3 emulation support, which isn't active yet). Do we care? thanks -- PMM