This patch implements the following tests for kvmclock:
- A simple wall clock test to check whether a consistent value was
returned.
- A monotonic cycle test to check whether the kvmclock driver could
return monotnic cycles in SMP guests.
- A raw cycle test to check whether the hypervios could provide
monotonic cycles.

Signed-off-by: Jason Wang <jasow...@redhat.com>
---
 kvm/test/x86/kvmclock_test.c |  128 ++++++++++++++++++++++++++++++++++++++++++
 kvm/test/x86/unittests.cfg   |    4 +
 2 files changed, 132 insertions(+), 0 deletions(-)
 create mode 100644 kvm/test/x86/kvmclock_test.c

diff --git a/kvm/test/config-x86-common.mak b/kvm/test/config-x86-common.mak
index a6ee18c..3dfd450 100644
--- a/kvm/test/config-x86-common.mak
+++ b/kvm/test/config-x86-common.mak
@@ -25,7 +25,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
 tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
                $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
                $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
-               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
+               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
+              $(TEST_DIR)/kvmclock_test.flat
 
 tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
 
@@ -67,6 +68,9 @@ $(TEST_DIR)/xsave.flat: $(cstart.o) $(TEST_DIR)/idt.o 
$(TEST_DIR)/xsave.o
 $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
                             $(TEST_DIR)/vm.o
 
+$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
+                               $(TEST_DIR)/kvmclock_test.o
+
 arch_clean:
        $(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
        $(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
diff --git a/kvm/test/x86/kvmclock_test.c b/kvm/test/x86/kvmclock_test.c
new file mode 100644
index 0000000..8530a89
--- /dev/null
+++ b/kvm/test/x86/kvmclock_test.c
@@ -0,0 +1,128 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "kvmclock.h"
+
+#define TEST_LOOPS 100000000
+
+static inline int atomic_read(int *v)
+{
+    return (*(volatile int *)&(v));
+}
+
+static inline void atomic_dec(int *v)
+{
+    asm volatile("lock decl %0": "+m" (*v));
+}
+
+struct test_info {
+    struct spinlock lock; 
+    u64 loops;                /* test loops */
+    u64 warps;                /* warp count */
+    long long worst;          /* worst warp */
+    volatile cycle_t last;    /* last cycle seen by test */
+    int ncpus;                /* number of cpu in the test*/
+};
+
+struct test_info ti[2];
+
+static int wallclock_test()
+{
+    int i;
+    struct timespec ts, ts_last;
+    
+    kvm_get_wallclock(&ts_last);
+
+    for (i=0; i < 100; i++){
+       kvm_get_wallclock(&ts);
+       if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
+           printf ("Inconsistent wall clock returned!\n");
+           return 1;
+       }
+    }
+    return 0;
+}
+
+static void kvm_clock_test(void *data)
+{
+    struct test_info *hv_test_info = (struct test_info *)data;
+    int index = smp_id();
+    int i;
+    
+    for (i = 0; i < hv_test_info->loops; i++){
+       cycle_t t0, t1;
+       long long delta;
+
+       spin_lock(&hv_test_info->lock);
+       t1 = kvm_clock_read();
+       t0 = hv_test_info->last;
+       hv_test_info->last = kvm_clock_read();
+       spin_unlock(&hv_test_info->lock);
+
+       delta = t1 - t0;
+       if (delta < 0){
+           spin_lock(&hv_test_info->lock);
+           ++hv_test_info->warps;
+           if (delta < hv_test_info->worst){
+               hv_test_info->worst = delta;
+               printf("Worst warp %lld %\n", hv_test_info->worst);
+           }      
+           spin_unlock(&hv_test_info->lock);
+       }
+       
+       if (!((unsigned long)i & 31))
+           asm volatile("rep; nop");
+    }
+
+    atomic_dec(&hv_test_info->ncpus);
+}
+
+static int cycle_test(int ncpus, int loops, struct test_info *ti)
+{
+    int i;
+
+    ti->ncpus = ncpus;
+    ti->loops = loops;
+    for (i = ncpus - 1; i >= 0; --i)
+       on_cpu_async(i, kvm_clock_test, (void *)ti);
+
+    /* Wait for the end of other vcpu */
+    while(!atomic_read(&ti->ncpus))
+       ;
+    
+    printf("Total vcpus: %d\n", ncpus);
+    printf("Total loops: %lld\n", ti->loops * ncpus);
+    printf("Total warps: %lld\n", ti->warps);
+    printf("Worst warp:  %lld\n", ti->worst);
+
+    return ti->warps ? 1 : 0;
+}
+
+int main()
+{
+    int ncpus = cpu_count();
+    int nerr = 0, i;
+  
+    smp_init();
+
+    if (ncpus > MAX_CPU)
+       ncpus = MAX_CPU;
+    for (i = 0; i < ncpus; ++i)
+       on_cpu(i, kvm_clock_init, (void *)0);
+
+    printf("Wallclock test:\n");
+    nerr += wallclock_test();
+
+    pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+    printf("Monotonic cycle test:\n");
+    nerr += cycle_test(ncpus, TEST_LOOPS, &ti[0]);
+
+    pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT 
+                     | PVCLOCK_CYCLE_RAW_TEST_BIT);
+    printf("Raw cycle test:\n");
+    nerr += cycle_test(ncpus, TEST_LOOPS, &ti[1]);
+
+    for (i = 0; i < ncpus; ++i)
+       on_cpu(i, kvm_clock_clear, (void *)0);
+
+    return nerr > 0 ? 1 : 0;
+}
diff --git a/kvm/test/x86/unittests.cfg b/kvm/test/x86/unittests.cfg
index f39c5bd..0d077ac 100644
--- a/kvm/test/x86/unittests.cfg
+++ b/kvm/test/x86/unittests.cfg
@@ -53,3 +53,7 @@ file = xsave.flat
 
 [rmap_chain]
 file = rmap_chain.flat
+
+[kvmclock]
+file = kvmclock_test.flat
+smp = 2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to