From: Christian Ehrhardt <[EMAIL PROTECTED]>

Other existing kvm stats are either just counters (kvm_stat) reported for kvm
generally or trace based aproaches like kvm_trace.
For kvm on powerpc we had the need to track the timings of the different exit
types. While this could be achieved parsing data created with a kvm_trace
extension this adds too muhc overhead (at least on embedded powerpc) slowing
down the workloads we wanted to measure.

Therefore this patch adds a in kernel exit timing statistic to the powerpc kvm
code. These statistic is available per vm&vcpu under the kvm debugfs directory.
As this statistic is low, but still some overhead it can be enabled via a
.config entry and should be off by default.

Since this patch touched all powerpc kvm_stat code anyway this code is now
merged and simpliefied together with the exit timing statistic code (still
working with exit timing disabled in .config).

Here is a sample output (after postprocessing with the awk script I'll post in 
reply to this patch) how the results look like.
sum of time 27504898
        MMIO: count        824 min         51 max        555 sum                
75825 sum_quad              9232871 avg          92.0206 stddev       52.318 %  
0.28
         DCR: count        140 min         44 max         92 sum                
 6746 sum_quad               327658 avg          48.1857 stddev        4.307 %  
0.02
      SIGNAL: count          2 min        309 max        993 sum                
 1302 sum_quad              1081530 avg         651.0000 stddev      342.000 %  
0.00
    ITLBREAL: count        293 min         11 max         14 sum                
 3515 sum_quad                42175 avg          11.9966 stddev        0.155 %  
0.01
    ITLBVIRT: count     113822 min         20 max        338 sum              
2595412 sum_quad             60256824 avg          22.8024 stddev        3.074 
%  9.44
    DTLBREAL: count        242 min         11 max         14 sum                
 2908 sum_quad                34974 avg          12.0165 stddev        0.352 %  
0.01
    DTLBVIRT: count      66687 min         21 max        329 sum              
1530048 sum_quad             35434926 avg          22.9437 stddev        2.224 
%  5.56
     SYSCALL: count         72 min          9 max         10 sum                
  649 sum_quad                 5851 avg           9.0139 stddev        0.117 %  
0.00
         ISI: count         56 min          9 max         10 sum                
  506 sum_quad                 4574 avg           9.0357 stddev        0.186 %  
0.00
         DSI: count         49 min          9 max         10 sum                
  448 sum_quad                 4102 avg           9.1429 stddev        0.350 %  
0.00
    EMULINST: count     211220 min          7 max       7700 sum              
3292984 sum_quad           5797023806 avg          15.5903 stddev      164.931 
% 11.97
         DEC: count       6582 min         49 max        322 sum               
367567 sum_quad             22996737 avg          55.8443 stddev       19.373 % 
 1.34
      EXTINT: count          4 min         79 max        513 sum                
  797 sum_quad               290423 avg         199.2500 stddev      181.398 %  
0.00
 TIMEINGUEST: count     399993 min          0 max       3952 sum             
19626191 sum_quad          61148587807 avg          49.0663 stddev      387.900 
% 71.36

Signed-off-by: Christian Ehrhardt <[EMAIL PROTECTED]>
---

[diffstat]
 arch/powerpc/include/asm/kvm_host.h         |   49 ++++++++
 arch/powerpc/include/asm/kvm_timing_stats.h |  142 ++++++++++++++++++++++++
 arch/powerpc/include/asm/mmu-44x.h          |    1
 arch/powerpc/kernel/asm-offsets.c           |   11 +
 arch/powerpc/kvm/Kconfig                    |    9 +
 arch/powerpc/kvm/booke.c                    |   35 +++---
 arch/powerpc/kvm/booke.h                    |    7 +
 arch/powerpc/kvm/booke_interrupts.S         |   24 ++++
 arch/powerpc/kvm/powerpc.c                  |  163 +++++++++++++++++++++++++++-
 9 files changed, 425 insertions(+), 16 deletions(-)

[diff]

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -71,7 +71,43 @@
        u32 word2;
 };
 
+enum kvm_exit_types {
+       MMIO_EXITS,
+       DCR_EXITS,
+       SIGNAL_EXITS,
+       ITLB_REAL_MISS_EXITS,
+       ITLB_VIRT_MISS_EXITS,
+       DTLB_REAL_MISS_EXITS,
+       DTLB_VIRT_MISS_EXITS,
+       SYSCALL_EXITS,
+       ISI_EXITS,
+       DSI_EXITS,
+       EMULATED_INST_EXITS,
+       EMULATED_MTMSRWE_EXITS,
+       DEC_EXITS,
+       EXT_INTR_EXITS,
+       HALT_WAKEUP,
+       USR_PR_INST,
+       FP_UNAVAIL,
+       DEBUG_EXITS,
+       TIMEINGUEST,
+       __NUMBER_OF_KVM_EXIT_TYPES
+};
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+/* allow access to big endian 32bit upper/lower parts and 64bit var */
+typedef union {
+       u64 tv64;
+       struct {
+               u32 tbu, tbl;
+       } tv32;
+} exit_timing_t;
+#endif
+
 struct kvm_arch {
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       unsigned int vm_id;
+#endif
 };
 
 struct kvm_vcpu_arch {
@@ -130,6 +166,19 @@
        u32 dbcr0;
        u32 dbcr1;
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       exit_timing_t timing_exit;
+       exit_timing_t timing_last_enter;
+       u32 last_exit_type;
+       u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
+       u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+       u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+       u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+       u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+       u64 timing_last_exit;
+       struct dentry *debugfs_exit_timing;
+#endif
+
        u32 last_inst;
        ulong fault_dear;
        ulong fault_esr;
diff --git a/arch/powerpc/include/asm/kvm_timing_stats.h 
b/arch/powerpc/include/asm/kvm_timing_stats.h
new file mode 100644
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_timing_stats.h
@@ -0,0 +1,168 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Christian Ehrhardt <[EMAIL PROTECTED]>
+ */
+
+#ifndef __POWERPC_KVM_EXITTIMING_H__
+#define __POWERPC_KVM_EXITTIMING_H__
+
+#include <linux/kvm_host.h>
+#include <asm/time.h>
+#include <asm-generic/div64.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+static inline void init_timing_stats(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       /* pause guest execution to avoid concurrent updates */
+       local_irq_disable();
+       mutex_lock(&vcpu->mutex);
+
+       vcpu->arch.last_exit_type = 0xDEAD;
+       for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+               vcpu->arch.timing_count_type[i] = 0;
+               vcpu->arch.timing_max_duration[i] = 0;
+               vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
+               vcpu->arch.timing_sum_duration[i] = 0;
+               vcpu->arch.timing_sum_quad_duration[i] = 0;
+       }
+       vcpu->arch.timing_last_exit = 0;
+       vcpu->arch.timing_exit.tv64 = 0;
+       vcpu->arch.timing_last_enter.tv64 = 0;
+
+       mutex_unlock(&vcpu->mutex);
+       local_irq_enable();
+}
+
+static inline void add_exit_timing(struct kvm_vcpu *vcpu,
+                                       u64 duration, int type)
+{
+       u64 old;
+
+       do_div(duration, tb_ticks_per_usec);
+       if (unlikely(duration > 0xFFFFFFFF)) {
+               printk(KERN_ERR"%s - duration too big -> overflow"
+                       " duration %lld type %d exit #%d\n",
+                       __func__, duration, type,
+                       vcpu->arch.timing_count_type[type]);
+               return;
+       }
+
+       vcpu->arch.timing_count_type[type]++;
+
+       /* sum */
+       old = vcpu->arch.timing_sum_duration[type];
+       vcpu->arch.timing_sum_duration[type] += duration;
+       if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
+               printk(KERN_ERR"%s - wrap adding sum of durations"
+                       " old %lld new %lld type %d exit # of type %d\n",
+                       __func__, old, vcpu->arch.timing_sum_duration[type],
+                       type, vcpu->arch.timing_count_type[type]);
+       }
+
+       /* square sum */
+       old = vcpu->arch.timing_sum_quad_duration[type];
+       vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
+       if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
+               printk(KERN_ERR"%s - wrap adding sum of squared durations"
+                       " old %lld new %lld type %d exit # of type %d\n",
+                       __func__, old,
+                       vcpu->arch.timing_sum_quad_duration[type],
+                       type, vcpu->arch.timing_count_type[type]);
+       }
+
+       /* set min/max */
+       if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
+               vcpu->arch.timing_min_duration[type] = duration;
+       if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
+               vcpu->arch.timing_max_duration[type] = duration;
+}
+
+static inline void update_timing_stats(struct kvm_vcpu *vcpu)
+{
+       u64 exit = vcpu->arch.timing_last_exit;
+       u64 enter = vcpu->arch.timing_last_enter.tv64;
+
+       /* save exit time, used next exit when the reenter time is known */
+       vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
+
+       if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
+               return; /* skip incomplete cycle (e.g. after reset) */
+
+       /* update statistics for average and standard deviation */
+       add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
+       /* enter -> timing_last_exit is time spent in guest - log this too */
+       add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
+                       TIMEINGUEST);
+}
+#else
+#define init_timing_stats(x) do { } while (0)
+#define update_timing_stats(x) do { } while (0)
+#endif /* CONFIG_KVM_BOOKE_EXIT_TIMING */
+
+static inline void account_exit(struct kvm_vcpu *vcpu, int type)
+{
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       vcpu->arch.last_exit_type = type;
+#endif
+       /* type is usually known at build time */
+       switch (type) {
+       case EXT_INTR_EXITS:
+               vcpu->stat.ext_intr_exits++;
+               break;
+       case DEC_EXITS:
+               vcpu->stat.dec_exits++;
+               break;
+       case EMULATED_INST_EXITS:
+               vcpu->stat.emulated_inst_exits++;
+               break;
+       case DCR_EXITS:
+               vcpu->stat.dcr_exits++;
+               break;
+       case DSI_EXITS:
+               vcpu->stat.dsi_exits++;
+               break;
+       case ISI_EXITS:
+               vcpu->stat.isi_exits++;
+               break;
+       case SYSCALL_EXITS:
+               vcpu->stat.syscall_exits++;
+               break;
+       case DTLB_REAL_MISS_EXITS:
+               vcpu->stat.dtlb_real_miss_exits++;
+               break;
+       case DTLB_VIRT_MISS_EXITS:
+               vcpu->stat.dtlb_virt_miss_exits++;
+               break;
+       case MMIO_EXITS:
+               vcpu->stat.mmio_exits++;
+               break;
+       case ITLB_REAL_MISS_EXITS:
+               vcpu->stat.itlb_real_miss_exits++;
+               break;
+       case ITLB_VIRT_MISS_EXITS:
+               vcpu->stat.itlb_virt_miss_exits++;
+               break;
+       case SIGNAL_EXITS:
+               vcpu->stat.signal_exits++;
+               break;
+       }
+}
+
+#endif /* __POWERPC_KVM_EXITTIMING_H__ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h 
b/arch/powerpc/include/asm/mmu-44x.h
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -56,6 +56,7 @@
 #ifndef __ASSEMBLY__
 
 extern unsigned int tlb_44x_hwater;
+extern unsigned int tlb_44x_index;
 
 typedef struct {
        unsigned long id;
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -387,5 +387,16 @@
        DEFINE(PTE_SHIFT, PTE_SHIFT);
 #endif
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
+                                               arch.timing_exit.tv32.tbu));
+       DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
+                                               arch.timing_exit.tv32.tbl));
+       DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
+                                       arch.timing_last_enter.tv32.tbu));
+       DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
+                                       arch.timing_last_enter.tv32.tbl));
+#endif
+
        return 0;
 }
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -34,6 +34,15 @@
        ---help---
          KVM can run unmodified 440 guest kernels on 440 host processors.
 
+config KVM_BOOKE_EXIT_TIMING
+       bool "Trace detailed exit Timing"
+       depends on KVM && 44x
+       ---help---
+         Inserts code to trace timestamps for every exit/enter cycle. A per 
vcpu
+         report is available in debugfs kvm/VM_###/VPCU_###_exit_timing.
+         The overhead is relatively small, however it is not recommended for
+         production environments.
+
 config KVM_TRACE
        bool "KVM trace support"
        depends on KVM && MARKERS && SYSFS
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -27,6 +27,7 @@
 #include <asm/cputable.h>
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
+#include <asm/kvm_timing_stats.h>
 #include <asm/cacheflush.h>
 
 #include "booke.h"
@@ -183,6 +184,9 @@
        enum emulation_result er;
        int r = RESUME_HOST;
 
+       /* update before a new last_exit_type is rewritten */
+       update_timing_stats(vcpu);
+
        local_irq_enable();
 
        run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -196,7 +200,7 @@
                break;
 
        case BOOKE_INTERRUPT_EXTERNAL:
-               vcpu->stat.ext_intr_exits++;
+               account_exit(vcpu, EXT_INTR_EXITS);
                if (need_resched())
                        cond_resched();
                r = RESUME_GUEST;
@@ -211,7 +215,7 @@
                 * we do reschedule the host will fault over it. Perhaps we
                 * should politely restore the host's entries to minimize
                 * misses before ceding control. */
-               vcpu->stat.dec_exits++;
+               account_exit(vcpu, DEC_EXITS);
                if (need_resched())
                        cond_resched();
                r = RESUME_GUEST;
@@ -224,6 +228,7 @@
                        vcpu->arch.esr = vcpu->arch.fault_esr;
                        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
                        r = RESUME_GUEST;
+                       account_exit(vcpu, USR_PR_INST);
                        break;
                }
 
@@ -232,12 +237,12 @@
                case EMULATE_DONE:
                        /* Future optimization: only reload non-volatiles if
                         * they were actually modified by emulation. */
-                       vcpu->stat.emulated_inst_exits++;
+                       account_exit(vcpu, EMULATED_INST_EXITS);
                        r = RESUME_GUEST_NV;
                        break;
                case EMULATE_DO_DCR:
                        run->exit_reason = KVM_EXIT_DCR;
-                       vcpu->stat.dcr_exits++;
+                       account_exit(vcpu, DCR_EXITS);
                        r = RESUME_HOST;
                        break;
                case EMULATE_FAIL:
@@ -257,6 +262,7 @@
 
        case BOOKE_INTERRUPT_FP_UNAVAIL:
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
+               account_exit(vcpu, FP_UNAVAIL);
                r = RESUME_GUEST;
                break;
 
@@ -264,20 +270,20 @@
                vcpu->arch.dear = vcpu->arch.fault_dear;
                vcpu->arch.esr = vcpu->arch.fault_esr;
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
-               vcpu->stat.dsi_exits++;
+               account_exit(vcpu, DSI_EXITS);
                r = RESUME_GUEST;
                break;
 
        case BOOKE_INTERRUPT_INST_STORAGE:
                vcpu->arch.esr = vcpu->arch.fault_esr;
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
-               vcpu->stat.isi_exits++;
+               account_exit(vcpu, ISI_EXITS);
                r = RESUME_GUEST;
                break;
 
        case BOOKE_INTERRUPT_SYSCALL:
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
-               vcpu->stat.syscall_exits++;
+               account_exit(vcpu, SYSCALL_EXITS);
                r = RESUME_GUEST;
                break;
 
@@ -293,7 +299,7 @@
                        kvmppc_booke_queue_irqprio(vcpu, 
BOOKE_IRQPRIO_DTLB_MISS);
                        vcpu->arch.dear = vcpu->arch.fault_dear;
                        vcpu->arch.esr = vcpu->arch.fault_esr;
-                       vcpu->stat.dtlb_real_miss_exits++;
+                       account_exit(vcpu, DTLB_REAL_MISS_EXITS);
                        r = RESUME_GUEST;
                        break;
                }
@@ -310,13 +316,13 @@
                         * invoking the guest. */
                        kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
                                       gtlbe->word2);
-                       vcpu->stat.dtlb_virt_miss_exits++;
+                       account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
                        r = RESUME_GUEST;
                } else {
                        /* Guest has mapped and accessed a page which is not
                         * actually RAM. */
                        r = kvmppc_emulate_mmio(run, vcpu);
-                       vcpu->stat.mmio_exits++;
+                       account_exit(vcpu, MMIO_EXITS);
                }
 
                break;
@@ -334,11 +340,11 @@
                if (!gtlbe) {
                        /* The guest didn't have a mapping for it. */
                        kvmppc_booke_queue_irqprio(vcpu, 
BOOKE_IRQPRIO_ITLB_MISS);
-                       vcpu->stat.itlb_real_miss_exits++;
+                       account_exit(vcpu, ITLB_REAL_MISS_EXITS);
                        break;
                }
 
-               vcpu->stat.itlb_virt_miss_exits++;
+               account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
 
                gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
 
@@ -370,6 +376,7 @@
                mtspr(SPRN_DBSR, dbsr);
 
                run->exit_reason = KVM_EXIT_DEBUG;
+               account_exit(vcpu, DEBUG_EXITS);
                r = RESUME_HOST;
                break;
        }
@@ -390,7 +397,7 @@
                if (signal_pending(current)) {
                        run->exit_reason = KVM_EXIT_INTR;
                        r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
-                       vcpu->stat.signal_exits++;
+                       account_exit(vcpu, SIGNAL_EXITS);
                }
        }
 
@@ -409,6 +416,8 @@
        /* Eye-catching number so we know if the guest takes an interrupt
         * before it's programmed its own IVPR. */
        vcpu->arch.ivpr = 0x55550000;
+
+       init_timing_stats(vcpu);
 
        return kvmppc_core_vcpu_setup(vcpu);
 }
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -22,6 +22,7 @@
 
 #include <linux/types.h>
 #include <linux/kvm_host.h>
+#include <asm/kvm_timing_stats.h>
 
 /* interrupt priortity ordering */
 #define BOOKE_IRQPRIO_DATA_STORAGE 0
@@ -50,8 +51,12 @@
 
        vcpu->arch.msr = new_msr;
 
-       if (vcpu->arch.msr & MSR_WE)
+       if (vcpu->arch.msr & MSR_WE) {
+               /* whatever exit it was before, this would clobber its timings
+                * count MSR_WE wait's in seperate exit category */
+               account_exit(vcpu, EMULATED_MTMSRWE_EXITS);
                kvm_vcpu_block(vcpu);
+       }
 }
 
 #endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S 
b/arch/powerpc/kvm/booke_interrupts.S
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -106,6 +106,18 @@
 
        li      r6, 1
        slw     r6, r6, r5
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       /* save exit time */
+..exit_tbu_overflow_loop:
+       mfspr   r7, SPRN_TBRU
+       mfspr   r8, SPRN_TBRL
+       mfspr   r9, SPRN_TBRU
+       cmpw    r9, r7
+       bne     ..exit_tbu_overflow_loop
+       stw     r8, VCPU_TIMING_EXIT_TBL(r4)
+       stw     r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
 
        /* Save the faulting instruction and all GPRs for emulation. */
        andi.   r7, r6, NEED_INST_MASK
@@ -423,6 +435,18 @@
        lwz     r3, VCPU_SPRG7(r4)
        mtspr   SPRN_SPRG7, r3
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       /* save enter time */
+..enter_tbu_overflow_loop:
+       mfspr   r6, SPRN_TBRU
+       mfspr   r7, SPRN_TBRL
+       mfspr   r8, SPRN_TBRU
+       cmpw    r8, r6
+       bne     ..enter_tbu_overflow_loop
+       stw     r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
+       stw     r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
        /* Finish loading guest volatiles and jump to guest. */
        lwz     r3, VCPU_CTR(r4)
        mtctr   r3
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,7 +28,10 @@
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
 #include <asm/tlbflush.h>
-
+#include <asm/kvm_timing_stats.h>
+#include <asm/atomic.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
 
 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
 {
@@ -101,6 +104,153 @@
        *(int *)rtn = kvmppc_core_check_processor_compat();
 }
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
+       "MMIO",
+       "DCR",
+       "SIGNAL",
+       "ITLBREAL",
+       "ITLBVIRT",
+       "DTLBREAL",
+       "DTLBVIRT",
+       "SYSCALL",
+       "ISI",
+       "DSI",
+       "EMULINST",
+       "EMUL_MSR_WE",
+       "DEC",
+       "EXTINT",
+       "HALT",
+       "USR_PR_INST",
+       "FP_UNAVAIL",
+       "DEBUG",
+       "TIMEINGUEST"
+};
+
+/* assign a unique number to each vm created */
+static atomic_t vm_count = ATOMIC_INIT(0);
+
+static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
+{
+       struct kvm_vcpu *vcpu = m->private;
+       int i;
+       u64 min, max;
+
+       for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+               if (vcpu->arch.timing_min_duration[i] == 0xFFFFFFFF)
+                       min = 0;
+               else
+                       min = vcpu->arch.timing_min_duration[i];
+               if (vcpu->arch.timing_max_duration[i] == 0)
+                       max = 0;
+               else
+                       max = vcpu->arch.timing_max_duration[i];
+
+               seq_printf(m, "%12s: count %10d min %10lld "
+                       "max %10lld sum %20lld sum_quad %20lld\n",
+                       kvm_exit_names[i], vcpu->arch.timing_count_type[i],
+                       vcpu->arch.timing_min_duration[i],
+                       vcpu->arch.timing_max_duration[i],
+                       vcpu->arch.timing_sum_duration[i],
+                       vcpu->arch.timing_sum_quad_duration[i]);
+       }
+       return 0;
+}
+
+static ssize_t kvmppc_exit_timing_write(struct file *file,
+                                      const char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       size_t len;
+       int err;
+       const char __user *p;
+       char c;
+
+       len = 0;
+       p = user_buf;
+       while (len < count) {
+               if (get_user(c, p++))
+                       err = -EFAULT;
+               if (c == 0 || c == '\n')
+                       break;
+               len++;
+       }
+
+       if (len > 1) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       if (copy_from_user(&c, user_buf, sizeof(c))) {
+               err = -EFAULT;
+               goto done;
+       }
+
+       if (c == 'c') {
+               struct seq_file *seqf = (struct seq_file *)file->private_data;
+               struct kvm_vcpu *vcpu = seqf->private;
+               /* write does not affect out buffers previsously generated with
+                * show. Seq file is locked here to prevent races of init with
+                * a show call */
+               mutex_lock(&seqf->lock);
+               init_timing_stats(vcpu);
+               mutex_unlock(&seqf->lock);
+               err = count;
+       } else {
+               err = -EINVAL;
+               goto done;
+       }
+
+done:
+       return err;
+}
+
+static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, kvmppc_exit_timing_show, inode->i_private);
+}
+
+static struct file_operations kvmppc_exit_timing_fops = {
+       .owner   = THIS_MODULE,
+       .open    = kvmppc_exit_timing_open,
+       .read    = seq_read,
+       .write   = kvmppc_exit_timing_write,
+       .llseek  = seq_lseek,
+       .release = single_release,
+};
+
+static void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
+{
+       static char dbg_fname[28];
+       struct dentry *debugfs_file;
+
+       snprintf(dbg_fname, sizeof(dbg_fname), "VM_%03u_VCPU_%03u_exit_timing",
+                vcpu->kvm->arch.vm_id, id);
+       debugfs_file = debugfs_create_file(dbg_fname, 0666,
+                                       kvm_debugfs_dir, vcpu,
+                                       &kvmppc_exit_timing_fops);
+
+       if (!debugfs_file) {
+               printk(KERN_ERR"%s: error creating debugfs file %s\n",
+                       __func__, dbg_fname);
+               return;
+       }
+
+       vcpu->arch.debugfs_exit_timing = debugfs_file;
+}
+
+static void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.debugfs_exit_timing) {
+               debugfs_remove(vcpu->arch.debugfs_exit_timing);
+               vcpu->arch.debugfs_exit_timing = NULL;
+       }
+}
+#else
+#define kvmppc_create_vcpu_debugfs(x, y) do { } while (0)
+#define kvmppc_remove_vcpu_debugfs(x) do { } while (0)
+#endif /* CONFIG_KVM_BOOKE_EXIT_TIMING */
+
 struct kvm *kvm_arch_create_vm(void)
 {
        struct kvm *kvm;
@@ -108,6 +258,10 @@
        kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
        if (!kvm)
                return ERR_PTR(-ENOMEM);
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+       kvm->arch.vm_id = atomic_inc_return(&vm_count);
+#endif
 
        return kvm;
 }
@@ -170,11 +324,15 @@
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 {
-       return kvmppc_core_vcpu_create(kvm, id);
+       struct kvm_vcpu *vcpu;
+       vcpu = kvmppc_core_vcpu_create(kvm, id);
+       kvmppc_create_vcpu_debugfs(vcpu, id);
+       return vcpu;
 }
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
+       kvmppc_remove_vcpu_debugfs(vcpu);
        kvmppc_core_vcpu_free(vcpu);
 }
 
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to