On 06.08.2013, at 06:16, Paul Mackerras wrote:

> Currently PR-style KVM keeps the volatile guest register values
> (R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
> the main kvm_vcpu struct.  For 64-bit, the shadow_vcpu exists in two
> places, a kmalloc'd struct and in the PACA, and it gets copied back
> and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
> can't rely on being able to access the kmalloc'd struct.
> 
> This changes the code to copy the volatile values into the shadow_vcpu
> as one of the last things done before entering the guest.  Similarly
> the values are copied back out of the shadow_vcpu to the kvm_vcpu
> immediately after exiting the guest.  We arrange for interrupts to be
> still disabled at this point so that we can't get preempted on 64-bit
> and end up copying values from the wrong PACA.
> 
> This means that the accessor functions in kvm_book3s.h for these
> registers are greatly simplified, and are same between PR and HV KVM.
> In places where accesses to shadow_vcpu fields are now replaced by
> accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
> Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
> 
> With this, the time to read the PVR one million times in a loop went
> from 582.1ms to 584.3ms (averages of 10 values), a difference which is
> not statistically significant given the variability of the results
> (the standard deviations were 9.5ms and 8.6ms respectively).  A version
> of the patch that used loops to copy the GPR values increased that time
> by around 5% to 611.2ms, so the loop has been unrolled.
> 
> Signed-off-by: Paul Mackerras <pau...@samba.org>
> ---
> arch/powerpc/include/asm/kvm_book3s.h     | 220 +++++-------------------------
> arch/powerpc/include/asm/kvm_book3s_asm.h |   6 +-
> arch/powerpc/include/asm/kvm_host.h       |   1 +
> arch/powerpc/kernel/asm-offsets.c         |   4 +-
> arch/powerpc/kvm/book3s_emulate.c         |   8 +-
> arch/powerpc/kvm/book3s_interrupts.S      |  26 +++-
> arch/powerpc/kvm/book3s_pr.c              | 122 ++++++++++++-----
> arch/powerpc/kvm/book3s_rmhandlers.S      |   5 -
> arch/powerpc/kvm/trace.h                  |   7 +-
> 9 files changed, 156 insertions(+), 243 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
> b/arch/powerpc/include/asm/kvm_book3s.h
> index fa19e2f..a8897c1 100644
> --- a/arch/powerpc/include/asm/kvm_book3s.h
> +++ b/arch/powerpc/include/asm/kvm_book3s.h
> @@ -198,140 +198,76 @@ extern void kvm_return_point(void);
> #include <asm/kvm_book3s_64.h>
> #endif
> 
> -#ifdef CONFIG_KVM_BOOK3S_PR
> -
> -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
> -{
> -     return to_book3s(vcpu)->hior;
> -}
> -
> -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
> -                     unsigned long pending_now, unsigned long old_pending)
> -{
> -     if (pending_now)
> -             vcpu->arch.shared->int_pending = 1;
> -     else if (old_pending)
> -             vcpu->arch.shared->int_pending = 0;
> -}
> -
> static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
> {
> -     if ( num < 14 ) {
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -             svcpu->gpr[num] = val;
> -             svcpu_put(svcpu);
> -             to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
> -     } else
> -             vcpu->arch.gpr[num] = val;
> +     vcpu->arch.gpr[num] = val;
> }
> 
> static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
> {
> -     if ( num < 14 ) {
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -             ulong r = svcpu->gpr[num];
> -             svcpu_put(svcpu);
> -             return r;
> -     } else
> -             return vcpu->arch.gpr[num];
> +     return vcpu->arch.gpr[num];
> }
> 
> static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     svcpu->cr = val;
> -     svcpu_put(svcpu);
> -     to_book3s(vcpu)->shadow_vcpu->cr = val;
> +     vcpu->arch.cr = val;
> }
> 
> static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     u32 r;
> -     r = svcpu->cr;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.cr;
> }
> 
> static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     svcpu->xer = val;
> -     to_book3s(vcpu)->shadow_vcpu->xer = val;
> -     svcpu_put(svcpu);
> +     vcpu->arch.xer = val;
> }
> 
> static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     u32 r;
> -     r = svcpu->xer;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.xer;
> }
> 
> static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     svcpu->ctr = val;
> -     svcpu_put(svcpu);
> +     vcpu->arch.ctr = val;
> }
> 
> static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     ulong r;
> -     r = svcpu->ctr;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.ctr;
> }
> 
> static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     svcpu->lr = val;
> -     svcpu_put(svcpu);
> +     vcpu->arch.lr = val;
> }
> 
> static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     ulong r;
> -     r = svcpu->lr;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.lr;
> }
> 
> static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     svcpu->pc = val;
> -     svcpu_put(svcpu);
> +     vcpu->arch.pc = val;
> }
> 
> static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     ulong r;
> -     r = svcpu->pc;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.pc;
> }
> 
> static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
> {
>       ulong pc = kvmppc_get_pc(vcpu);
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     u32 r;
> 
>       /* Load the instruction manually if it failed to do so in the
>        * exit path */
> -     if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
> -             kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
> +     if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> +             kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
> 
> -     r = svcpu->last_inst;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.last_inst;
> }
> 
> /*
> @@ -342,26 +278,34 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu 
> *vcpu)
> static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
> {
>       ulong pc = kvmppc_get_pc(vcpu) - 4;
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     u32 r;
> 
>       /* Load the instruction manually if it failed to do so in the
>        * exit path */
> -     if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
> -             kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
> +     if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> +             kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
> 
> -     r = svcpu->last_inst;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.last_inst;
> }
> 
> static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
> {
> -     struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -     ulong r;
> -     r = svcpu->fault_dar;
> -     svcpu_put(svcpu);
> -     return r;
> +     return vcpu->arch.fault_dar;
> +}
> +
> +#ifdef CONFIG_KVM_BOOK3S_PR
> +
> +static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
> +{
> +     return to_book3s(vcpu)->hior;
> +}
> +
> +static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
> +                     unsigned long pending_now, unsigned long old_pending)
> +{
> +     if (pending_now)
> +             vcpu->arch.shared->int_pending = 1;
> +     else if (old_pending)
> +             vcpu->arch.shared->int_pending = 0;
> }
> 
> static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
> @@ -395,100 +339,6 @@ static inline void kvmppc_update_int_pending(struct 
> kvm_vcpu *vcpu,
> {
> }
> 
> -static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
> -{
> -     vcpu->arch.gpr[num] = val;
> -}
> -
> -static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
> -{
> -     return vcpu->arch.gpr[num];
> -}
> -
> -static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
> -{
> -     vcpu->arch.cr = val;
> -}
> -
> -static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
> -{
> -     return vcpu->arch.cr;
> -}
> -
> -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
> -{
> -     vcpu->arch.xer = val;
> -}
> -
> -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
> -{
> -     return vcpu->arch.xer;
> -}
> -
> -static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
> -{
> -     vcpu->arch.ctr = val;
> -}
> -
> -static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
> -{
> -     return vcpu->arch.ctr;
> -}
> -
> -static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
> -{
> -     vcpu->arch.lr = val;
> -}
> -
> -static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
> -{
> -     return vcpu->arch.lr;
> -}
> -
> -static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
> -{
> -     vcpu->arch.pc = val;
> -}
> -
> -static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
> -{
> -     return vcpu->arch.pc;
> -}
> -
> -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
> -{
> -     ulong pc = kvmppc_get_pc(vcpu);
> -
> -     /* Load the instruction manually if it failed to do so in the
> -      * exit path */
> -     if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> -             kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
> -
> -     return vcpu->arch.last_inst;
> -}
> -
> -/*
> - * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
> - * Because the sc instruction sets SRR0 to point to the following
> - * instruction, we have to fetch from pc - 4.
> - */
> -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
> -{
> -     ulong pc = kvmppc_get_pc(vcpu) - 4;
> -
> -     /* Load the instruction manually if it failed to do so in the
> -      * exit path */
> -     if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> -             kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
> -
> -     return vcpu->arch.last_inst;
> -}
> -
> -static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
> -{
> -     return vcpu->arch.fault_dar;
> -}
> -
> static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
> {
>       return false;
> diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h 
> b/arch/powerpc/include/asm/kvm_book3s_asm.h
> index 9039d3c..4141409 100644
> --- a/arch/powerpc/include/asm/kvm_book3s_asm.h
> +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
> @@ -108,14 +108,14 @@ struct kvmppc_book3s_shadow_vcpu {
>       ulong gpr[14];
>       u32 cr;
>       u32 xer;
> -
> -     u32 fault_dsisr;
> -     u32 last_inst;
>       ulong ctr;
>       ulong lr;
>       ulong pc;
> +
>       ulong shadow_srr1;
>       ulong fault_dar;
> +     u32 fault_dsisr;
> +     u32 last_inst;
> 
> #ifdef CONFIG_PPC_BOOK3S_32
>       u32     sr[16];                 /* Guest SRs */
> diff --git a/arch/powerpc/include/asm/kvm_host.h 
> b/arch/powerpc/include/asm/kvm_host.h
> index 3328353..7b26395 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -463,6 +463,7 @@ struct kvm_vcpu_arch {
>       u32 ctrl;
>       ulong dabr;
>       ulong cfar;
> +     ulong shadow_srr1;
> #endif
>       u32 vrsave; /* also USPRG0 */
>       u32 mmucr;
> diff --git a/arch/powerpc/kernel/asm-offsets.c 
> b/arch/powerpc/kernel/asm-offsets.c
> index a67c76e..14a8004 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -515,18 +515,18 @@ int main(void)
>       DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
>       DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
>       DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
> +     DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
>       DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, 
> entry_exit_count));
>       DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
>       DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
>       DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, 
> napping_threads));
> -     DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
> -                        offsetof(struct kvmppc_vcpu_book3s, vcpu));
>       DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
>       DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
>       DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
> 
> #ifdef CONFIG_PPC_BOOK3S_64
> #ifdef CONFIG_KVM_BOOK3S_PR
> +     DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
> # define SVCPU_FIELD(x, f)    DEFINE(x, offsetof(struct paca_struct, 
> shadow_vcpu.f))
> #else
> # define SVCPU_FIELD(x, f)
> diff --git a/arch/powerpc/kvm/book3s_emulate.c 
> b/arch/powerpc/kvm/book3s_emulate.c
> index 360ce68..34044b1 100644
> --- a/arch/powerpc/kvm/book3s_emulate.c
> +++ b/arch/powerpc/kvm/book3s_emulate.c
> @@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct 
> kvm_vcpu *vcpu,
> 
>                       r = kvmppc_st(vcpu, &addr, 32, zeros, true);
>                       if ((r == -ENOENT) || (r == -EPERM)) {
> -                             struct kvmppc_book3s_shadow_vcpu *svcpu;
> -
> -                             svcpu = svcpu_get(vcpu);
>                               *advance = 0;
>                               vcpu->arch.shared->dar = vaddr;
> -                             svcpu->fault_dar = vaddr;
> +                             vcpu->arch.fault_dar = vaddr;
> 
>                               dsisr = DSISR_ISSTORE;
>                               if (r == -ENOENT)
> @@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct 
> kvm_vcpu *vcpu,
>                                       dsisr |= DSISR_PROTFAULT;
> 
>                               vcpu->arch.shared->dsisr = dsisr;
> -                             svcpu->fault_dsisr = dsisr;
> -                             svcpu_put(svcpu);
> +                             vcpu->arch.fault_dsisr = dsisr;
> 
>                               kvmppc_book3s_queue_irqprio(vcpu,
>                                       BOOK3S_INTERRUPT_DATA_STORAGE);
> diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
> b/arch/powerpc/kvm/book3s_interrupts.S
> index 17cfae5..c81a185 100644
> --- a/arch/powerpc/kvm/book3s_interrupts.S
> +++ b/arch/powerpc/kvm/book3s_interrupts.S
> @@ -26,8 +26,12 @@
> 
> #if defined(CONFIG_PPC_BOOK3S_64)
> #define FUNC(name)            GLUE(.,name)
> +#define GET_SHADOW_VCPU(reg)    addi reg, r13, PACA_SVCPU
> +
> #elif defined(CONFIG_PPC_BOOK3S_32)
> #define FUNC(name)            name
> +#define GET_SHADOW_VCPU(reg) lwz     reg, (THREAD + THREAD_KVM_SVCPU)(r2)
> +
> #endif /* CONFIG_PPC_BOOK3S_XX */
> 
> #define VCPU_LOAD_NVGPRS(vcpu) \
> @@ -87,8 +91,13 @@ kvm_start_entry:
>       VCPU_LOAD_NVGPRS(r4)
> 
> kvm_start_lightweight:
> +     /* Copy registers into shadow vcpu so we can access them in real mode */
> +     GET_SHADOW_VCPU(r3)
> +     bl      FUNC(kvmppc_copy_to_svcpu)

This will clobber r3 and r4, no? We need to restore them from the stack here I 
would think.

> +     nop
> 
> #ifdef CONFIG_PPC_BOOK3S_64
> +     /* Get the dcbz32 flag */
>       PPC_LL  r3, VCPU_HFLAGS(r4)
>       rldicl  r3, r3, 0, 63           /* r3 &= 1 */
>       stb     r3, HSTATE_RESTORE_HID5(r13)
> @@ -125,8 +134,17 @@ kvmppc_handler_highmem:
>        *
>        */
> 
> -     /* R7 = vcpu */
> -     PPC_LL  r7, GPR4(r1)
> +     /* Transfer reg values from shadow vcpu back to vcpu struct */
> +     /* On 64-bit, interrupts are still off at this point */
> +     PPC_LL  r3, GPR4(r1)            /* vcpu pointer */
> +     GET_SHADOW_VCPU(r4)
> +     bl      FUNC(kvmppc_copy_from_svcpu)
> +     nop
> +
> +     /* Re-enable interrupts */
> +     mfmsr   r3
> +     ori     r3, r3, MSR_EE
> +     MTMSR_EERI(r3)
> 
> #ifdef CONFIG_PPC_BOOK3S_64
>       /*
> @@ -135,8 +153,12 @@ kvmppc_handler_highmem:
>        */
>       ld      r3, PACA_SPRG3(r13)
>       mtspr   SPRN_SPRG3, r3
> +
> #endif /* CONFIG_PPC_BOOK3S_64 */
> 
> +     /* R7 = vcpu */
> +     PPC_LL  r7, GPR4(r1)
> +
>       PPC_STL r14, VCPU_GPR(R14)(r7)
>       PPC_STL r15, VCPU_GPR(R15)(r7)
>       PPC_STL r16, VCPU_GPR(R16)(r7)
> diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
> index 6cb29ef..28146c1 100644
> --- a/arch/powerpc/kvm/book3s_pr.c
> +++ b/arch/powerpc/kvm/book3s_pr.c
> @@ -61,8 +61,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> #ifdef CONFIG_PPC_BOOK3S_64
>       struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
>       memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
> -     memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
> -            sizeof(get_paca()->shadow_vcpu));
>       svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
>       svcpu_put(svcpu);
> #endif
> @@ -77,8 +75,6 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
> #ifdef CONFIG_PPC_BOOK3S_64
>       struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
>       memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
> -     memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
> -            sizeof(get_paca()->shadow_vcpu));
>       to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
>       svcpu_put(svcpu);
> #endif
> @@ -87,6 +83,60 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
>       vcpu->cpu = -1;
> }
> 
> +/* Copy data needed by real-mode code from vcpu to shadow vcpu */
> +void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
> +                       struct kvm_vcpu *vcpu)
> +{
> +     svcpu->gpr[0] = vcpu->arch.gpr[0];
> +     svcpu->gpr[1] = vcpu->arch.gpr[1];
> +     svcpu->gpr[2] = vcpu->arch.gpr[2];
> +     svcpu->gpr[3] = vcpu->arch.gpr[3];
> +     svcpu->gpr[4] = vcpu->arch.gpr[4];
> +     svcpu->gpr[5] = vcpu->arch.gpr[5];
> +     svcpu->gpr[6] = vcpu->arch.gpr[6];
> +     svcpu->gpr[7] = vcpu->arch.gpr[7];
> +     svcpu->gpr[8] = vcpu->arch.gpr[8];
> +     svcpu->gpr[9] = vcpu->arch.gpr[9];
> +     svcpu->gpr[10] = vcpu->arch.gpr[10];
> +     svcpu->gpr[11] = vcpu->arch.gpr[11];
> +     svcpu->gpr[12] = vcpu->arch.gpr[12];
> +     svcpu->gpr[13] = vcpu->arch.gpr[13];
> +     svcpu->cr  = vcpu->arch.cr;
> +     svcpu->xer = vcpu->arch.xer;
> +     svcpu->ctr = vcpu->arch.ctr;
> +     svcpu->lr  = vcpu->arch.lr;
> +     svcpu->pc  = vcpu->arch.pc;
> +}
> +
> +/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
> +void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
> +                         struct kvmppc_book3s_shadow_vcpu *svcpu)
> +{
> +     vcpu->arch.gpr[0] = svcpu->gpr[0];
> +     vcpu->arch.gpr[1] = svcpu->gpr[1];
> +     vcpu->arch.gpr[2] = svcpu->gpr[2];
> +     vcpu->arch.gpr[3] = svcpu->gpr[3];
> +     vcpu->arch.gpr[4] = svcpu->gpr[4];
> +     vcpu->arch.gpr[5] = svcpu->gpr[5];
> +     vcpu->arch.gpr[6] = svcpu->gpr[6];
> +     vcpu->arch.gpr[7] = svcpu->gpr[7];
> +     vcpu->arch.gpr[8] = svcpu->gpr[8];
> +     vcpu->arch.gpr[9] = svcpu->gpr[9];
> +     vcpu->arch.gpr[10] = svcpu->gpr[10];
> +     vcpu->arch.gpr[11] = svcpu->gpr[11];
> +     vcpu->arch.gpr[12] = svcpu->gpr[12];
> +     vcpu->arch.gpr[13] = svcpu->gpr[13];
> +     vcpu->arch.cr  = svcpu->cr;
> +     vcpu->arch.xer = svcpu->xer;
> +     vcpu->arch.ctr = svcpu->ctr;
> +     vcpu->arch.lr  = svcpu->lr;
> +     vcpu->arch.pc  = svcpu->pc;
> +     vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
> +     vcpu->arch.fault_dar   = svcpu->fault_dar;
> +     vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
> +     vcpu->arch.last_inst   = svcpu->last_inst;
> +}
> +
> int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
> {
>       int r = 1; /* Indicate we want to get back into the guest */
> @@ -388,22 +438,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct 
> kvm_vcpu *vcpu,
> 
>       if (page_found == -ENOENT) {
>               /* Page not found in guest PTE entries */
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
>               vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
> -             vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
> +             vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
>               vcpu->arch.shared->msr |=
> -                     (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
> -             svcpu_put(svcpu);
> +                     vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
>               kvmppc_book3s_queue_irqprio(vcpu, vec);
>       } else if (page_found == -EPERM) {
>               /* Storage protection */
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
>               vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
> -             vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
> +             vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & 
> ~DSISR_NOHPTE;
>               vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
>               vcpu->arch.shared->msr |=
> -                     svcpu->shadow_srr1 & 0x00000000f8000000ULL;
> -             svcpu_put(svcpu);
> +                     vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
>               kvmppc_book3s_queue_irqprio(vcpu, vec);
>       } else if (page_found == -EINVAL) {
>               /* Page not found in guest SLB */
> @@ -643,21 +689,26 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
> kvm_vcpu *vcpu,
>       switch (exit_nr) {
>       case BOOK3S_INTERRUPT_INST_STORAGE:
>       {
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -             ulong shadow_srr1 = svcpu->shadow_srr1;
> +             ulong shadow_srr1 = vcpu->arch.shadow_srr1;
>               vcpu->stat.pf_instruc++;
> 
> #ifdef CONFIG_PPC_BOOK3S_32
>               /* We set segments as unused segments when invalidating them. So
>                * treat the respective fault as segment fault. */
> -             if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
> -                     kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
> -                     r = RESUME_GUEST;
> +             {
> +                     struct kvmppc_book3s_shadow_vcpu *svcpu;
> +                     u32 sr;
> +
> +                     svcpu = svcpu_get(vcpu);
> +                     sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];

Doesn't this break two concurrently running guests now that we don't copy the 
shadow vcpu anymore? Just move the sr array to a kmalloc'ed area until the 
whole vcpu is kmalloc'ed. Then you can get rid of all shadow vcpu code.

>                       svcpu_put(svcpu);
> -                     break;
> +                     if (sr == SR_INVALID) {
> +                             kvmppc_mmu_map_segment(vcpu, 
> kvmppc_get_pc(vcpu));
> +                             r = RESUME_GUEST;
> +                             break;
> +                     }
>               }
> #endif
> -             svcpu_put(svcpu);
> 
>               /* only care about PTEG not found errors, but leave NX alone */
>               if (shadow_srr1 & 0x40000000) {
> @@ -682,21 +733,26 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
> kvm_vcpu *vcpu,
>       case BOOK3S_INTERRUPT_DATA_STORAGE:
>       {
>               ulong dar = kvmppc_get_fault_dar(vcpu);
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -             u32 fault_dsisr = svcpu->fault_dsisr;
> +             u32 fault_dsisr = vcpu->arch.fault_dsisr;
>               vcpu->stat.pf_storage++;
> 
> #ifdef CONFIG_PPC_BOOK3S_32
>               /* We set segments as unused segments when invalidating them. So
>                * treat the respective fault as segment fault. */
> -             if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
> -                     kvmppc_mmu_map_segment(vcpu, dar);
> -                     r = RESUME_GUEST;
> +             {
> +                     struct kvmppc_book3s_shadow_vcpu *svcpu;
> +                     u32 sr;
> +
> +                     svcpu = svcpu_get(vcpu);
> +                     sr = svcpu->sr[dar >> SID_SHIFT];
>                       svcpu_put(svcpu);
> -                     break;
> +                     if (sr == SR_INVALID) {
> +                             kvmppc_mmu_map_segment(vcpu, dar);
> +                             r = RESUME_GUEST;
> +                             break;
> +                     }
>               }
> #endif
> -             svcpu_put(svcpu);
> 
>               /* The only case we need to handle is missing shadow PTEs */
>               if (fault_dsisr & DSISR_NOHPTE) {
> @@ -743,13 +799,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
> kvm_vcpu *vcpu,
>       case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
>       {
>               enum emulation_result er;
> -             struct kvmppc_book3s_shadow_vcpu *svcpu;
>               ulong flags;
> 
> program_interrupt:
> -             svcpu = svcpu_get(vcpu);
> -             flags = svcpu->shadow_srr1 & 0x1f0000ull;
> -             svcpu_put(svcpu);
> +             flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
> 
>               if (vcpu->arch.shared->msr & MSR_PR) {
> #ifdef EXIT_DEBUG
> @@ -881,9 +934,7 @@ program_interrupt:
>               break;
>       default:
>       {
> -             struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
> -             ulong shadow_srr1 = svcpu->shadow_srr1;
> -             svcpu_put(svcpu);
> +             ulong shadow_srr1 = vcpu->arch.shadow_srr1;
>               /* Ugh - bork here! What did we get? */
>               printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
>                       exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
> @@ -1058,11 +1109,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm 
> *kvm, unsigned int id)
>       if (!vcpu_book3s)
>               goto out;
> 
> +#ifdef CONFIG_KVM_BOOK3S_32
>       vcpu_book3s->shadow_vcpu =
>               kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
>       if (!vcpu_book3s->shadow_vcpu)
>               goto free_vcpu;
> -
> +#endif
>       vcpu = &vcpu_book3s->vcpu;
>       err = kvm_vcpu_init(vcpu, kvm, id);
>       if (err)
> @@ -1095,8 +1147,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm 
> *kvm, unsigned int id)
> uninit_vcpu:
>       kvm_vcpu_uninit(vcpu);
> free_shadow_vcpu:
> +#ifdef CONFIG_KVM_BOOK3S_32
>       kfree(vcpu_book3s->shadow_vcpu);
> free_vcpu:
> +#endif
>       vfree(vcpu_book3s);
> out:
>       return ERR_PTR(err);
> diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S 
> b/arch/powerpc/kvm/book3s_rmhandlers.S
> index 8f7633e..b64d7f9 100644
> --- a/arch/powerpc/kvm/book3s_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_rmhandlers.S
> @@ -179,11 +179,6 @@ _GLOBAL(kvmppc_entry_trampoline)
> 
>       li      r6, MSR_IR | MSR_DR
>       andc    r6, r5, r6      /* Clear DR and IR in MSR value */
> -     /*
> -      * Set EE in HOST_MSR so that it's enabled when we get into our
> -      * C exit handler function
> -      */
> -     ori     r5, r5, MSR_EE

This looks like an unrelated change?


Alex

>       mtsrr0  r7
>       mtsrr1  r6
>       RFI
> diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
> index e326489..a088e9a 100644
> --- a/arch/powerpc/kvm/trace.h
> +++ b/arch/powerpc/kvm/trace.h
> @@ -101,17 +101,12 @@ TRACE_EVENT(kvm_exit,
>       ),
> 
>       TP_fast_assign(
> -#ifdef CONFIG_KVM_BOOK3S_PR
> -             struct kvmppc_book3s_shadow_vcpu *svcpu;
> -#endif
>               __entry->exit_nr        = exit_nr;
>               __entry->pc             = kvmppc_get_pc(vcpu);
>               __entry->dar            = kvmppc_get_fault_dar(vcpu);
>               __entry->msr            = vcpu->arch.shared->msr;
> #ifdef CONFIG_KVM_BOOK3S_PR
> -             svcpu = svcpu_get(vcpu);
> -             __entry->srr1           = svcpu->shadow_srr1;
> -             svcpu_put(svcpu);
> +             __entry->srr1           = vcpu->arch.shadow_srr1;
> #endif
>               __entry->last_inst      = vcpu->arch.last_inst;
>       ),
> -- 
> 1.8.3.1
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to