From: Joerg Roedel <[email protected]>

Signed-off-by: Joerg Roedel <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>

diff --git a/kvm/test/x86/cstart64.S b/kvm/test/x86/cstart64.S
index f1a9d09..46e9d5c 100644
--- a/kvm/test/x86/cstart64.S
+++ b/kvm/test/x86/cstart64.S
@@ -51,6 +51,11 @@ gdt64:
        .quad 0x00cf93000000ffff // 64-bit data segment
        .quad 0x00affb000000ffff // 64-bit code segment (user)
        .quad 0x00cff3000000ffff // 64-bit data segment (user)
+       .quad 0x00cf9b000000ffff // 32-bit code segment
+       .quad 0x00cf92000000ffff // 32-bit code segment
+       .quad 0x008F9A000000FFFF // 16-bit code segment
+       .quad 0x008F92000000FFFF // 16-bit data segment
+
 tss_descr:
        .rept max_cpus
        .quad 0x000089000000ffff // 64-bit avail tss
diff --git a/kvm/test/x86/svm.c b/kvm/test/x86/svm.c
index 4a7a662..fd98505 100644
--- a/kvm/test/x86/svm.c
+++ b/kvm/test/x86/svm.c
@@ -4,6 +4,7 @@
 #include "msr.h"
 #include "vm.h"
 #include "smp.h"
+#include "types.h"
 
 static void setup_svm(void)
 {
@@ -235,6 +236,112 @@ static bool check_next_rip(struct test *test)
     return address == test->vmcb->control.next_rip;
 }
 
+static void prepare_mode_switch(struct test *test)
+{
+    test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
+                                             |  (1ULL << UD_VECTOR)
+                                             |  (1ULL << DF_VECTOR)
+                                             |  (1ULL << PF_VECTOR);
+    test->scratch = 0;
+}
+
+static void test_mode_switch(struct test *test)
+{
+    asm volatile("     cli\n"
+                "      ljmp *1f\n" /* jump to 32-bit code segment */
+                "1:\n"
+                "      .long 2f\n"
+                "      .long 40\n"
+                ".code32\n"
+                "2:\n"
+                "      movl %%cr0, %%eax\n"
+                "      btcl  $31, %%eax\n" /* clear PG */
+                "      movl %%eax, %%cr0\n"
+                "      movl $0xc0000080, %%ecx\n" /* EFER */
+                "      rdmsr\n"
+                "      btcl $8, %%eax\n" /* clear LME */
+                "      wrmsr\n"
+                "      movl %%cr4, %%eax\n"
+                "      btcl $5, %%eax\n" /* clear PAE */
+                "      movl %%eax, %%cr4\n"
+                "      movw $64, %%ax\n"
+                "      movw %%ax, %%ds\n"
+                "      ljmpl $56, $3f\n" /* jump to 16 bit protected-mode */
+                ".code16\n"
+                "3:\n"
+                "      movl %%cr0, %%eax\n"
+                "      btcl $0, %%eax\n" /* clear PE  */
+                "      movl %%eax, %%cr0\n"
+                "      ljmpl $0, $4f\n"   /* jump to real-mode */
+                "4:\n"
+                "      vmmcall\n"
+                "      movl %%cr0, %%eax\n"
+                "      btsl $0, %%eax\n" /* set PE  */
+                "      movl %%eax, %%cr0\n"
+                "      ljmpl $40, $5f\n" /* back to protected mode */
+                ".code32\n"
+                "5:\n"
+                "      movl %%cr4, %%eax\n"
+                "      btsl $5, %%eax\n" /* set PAE */
+                "      movl %%eax, %%cr4\n"
+                "      movl $0xc0000080, %%ecx\n" /* EFER */
+                "      rdmsr\n"
+                "      btsl $8, %%eax\n" /* set LME */
+                "      wrmsr\n"
+                "      movl %%cr0, %%eax\n"
+                "      btsl  $31, %%eax\n" /* set PG */
+                "      movl %%eax, %%cr0\n"
+                "      ljmpl $8, $6f\n"    /* back to long mode */
+                ".code64\n\t"
+                "6:\n"
+                "      vmmcall\n"
+                ::: "rax", "rbx", "rcx", "rdx", "memory");
+}
+
+static bool mode_switch_finished(struct test *test)
+{
+    u64 cr0, cr4, efer;
+
+    cr0  = test->vmcb->save.cr0;
+    cr4  = test->vmcb->save.cr4;
+    efer = test->vmcb->save.efer;
+
+    /* Only expect VMMCALL intercepts */
+    if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
+           return true;
+
+    /* Jump over VMMCALL instruction */
+    test->vmcb->save.rip += 3;
+
+    /* Do sanity checks */
+    switch (test->scratch) {
+    case 0:
+        /* Test should be in real mode now - check for this */
+        if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
+            (cr4  & 0x00000020) || /* CR4.PAE */
+            (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
+                return true;
+        break;
+    case 2:
+        /* Test should be back in long-mode now - check for this */
+        if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
+            ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
+            ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
+                   return true;
+       break;
+    }
+
+    /* one step forward */
+    test->scratch += 1;
+
+    return test->scratch == 2;
+}
+
+static bool check_mode_switch(struct test *test)
+{
+       return test->scratch == 2;
+}
+
 static struct test tests[] = {
     { "null", default_supported, default_prepare, null_test,
       default_finished, null_check },
@@ -251,6 +358,8 @@ static struct test tests[] = {
       default_finished, check_cr3_intercept },
     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
       default_finished, check_next_rip },
+    { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
+       mode_switch_finished, check_mode_switch },
 
 };
 
diff --git a/kvm/test/x86/types.h b/kvm/test/x86/types.h
new file mode 100644
index 0000000..fd22743
--- /dev/null
+++ b/kvm/test/x86/types.h
@@ -0,0 +1,20 @@
+#ifndef __TYPES_H
+#define __TYPES_H
+
+#define DE_VECTOR 0
+#define DB_VECTOR 1
+#define BP_VECTOR 3
+#define OF_VECTOR 4
+#define BR_VECTOR 5
+#define UD_VECTOR 6
+#define NM_VECTOR 7
+#define DF_VECTOR 8
+#define TS_VECTOR 10
+#define NP_VECTOR 11
+#define SS_VECTOR 12
+#define GP_VECTOR 13
+#define PF_VECTOR 14
+#define MF_VECTOR 16
+#define MC_VECTOR 18
+
+#endif
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to