The following patch saves the host FPU state and loads the guests FPU state if !(CR0.MP == 1 && CR0.TS == 1).

When CR0.MP == 1 && CR0.TS == 1, all FPU activity will generate exceptions. OS's use these exceptions to implement lazy FPU loading to improve context switch time. Since any FPU activity will generate traps, we don't have to worry about the guest modifying the host FPU state.

My microbenchmark of choice uses FPU operations so I think the results are currently tainted. I've only tested on a 32bit SVM system.

Signed-off-by: Anthony Liguori <[EMAIL PROTECTED]>

Regards,

Anthony Liguori


diff -r 962b18bbd6d9 kernel/kvm.h
--- a/kernel/kvm.h	Thu Feb 15 13:58:59 2007 +0000
+++ b/kernel/kvm.h	Thu Feb 15 13:23:56 2007 -0600
@@ -17,6 +17,7 @@
 #include <linux/kvm_para.h>
 
 #define CR0_PE_MASK (1ULL << 0)
+#define CR0_MP_MASK (1ULL << 1)
 #define CR0_TS_MASK (1ULL << 3)
 #define CR0_NE_MASK (1ULL << 5)
 #define CR0_WP_MASK (1ULL << 16)
@@ -263,6 +264,7 @@ struct kvm_vcpu {
 
 	struct kvm_guest_debug guest_debug;
 
+	int fx_active;
 	char fx_buf[FX_BUF_SIZE];
 	char *host_fx_image;
 	char *guest_fx_image;
diff -r 962b18bbd6d9 kernel/kvm_main.c
--- a/kernel/kvm_main.c	Thu Feb 15 13:58:59 2007 +0000
+++ b/kernel/kvm_main.c	Thu Feb 15 13:27:17 2007 -0600
@@ -404,6 +404,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsi
 
 	}
 
+	vcpu->fx_active = !((cr0 & CR0_TS_MASK) && (cr0 & CR0_MP_MASK));
 	kvm_arch_ops->set_cr0(vcpu, cr0);
 	vcpu->cr0 = cr0;
 
@@ -530,6 +531,7 @@ void fx_init(struct kvm_vcpu *vcpu)
 	fx_image->mxcsr = 0x1f80;
 	memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
 	       0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
+	vcpu->fx_active = 1;
 }
 EXPORT_SYMBOL_GPL(fx_init);
 
diff -r 962b18bbd6d9 kernel/svm.c
--- a/kernel/svm.c	Thu Feb 15 13:58:59 2007 +0000
+++ b/kernel/svm.c	Thu Feb 15 13:30:52 2007 -0600
@@ -1461,8 +1461,10 @@ again:
 		load_db_regs(vcpu->svm->db_regs);
 	}
 
-	fx_save(vcpu->host_fx_image);
-	fx_restore(vcpu->guest_fx_image);
+	if (vcpu->fx_active) {
+		fx_save(vcpu->host_fx_image);
+		fx_restore(vcpu->guest_fx_image);
+	}
 
 	asm volatile (
 #ifdef CONFIG_X86_64
@@ -1573,8 +1575,10 @@ again:
 #endif
 		: "cc", "memory" );
 
-	fx_save(vcpu->guest_fx_image);
-	fx_restore(vcpu->host_fx_image);
+	if (vcpu->fx_active) {
+		fx_save(vcpu->guest_fx_image);
+		fx_restore(vcpu->host_fx_image);
+	}
 
 	if ((vcpu->svm->vmcb->save.dr7 & 0xff))
 		load_db_regs(vcpu->svm->host_db_regs);
diff -r 962b18bbd6d9 kernel/vmx.c
--- a/kernel/vmx.c	Thu Feb 15 13:58:59 2007 +0000
+++ b/kernel/vmx.c	Thu Feb 15 13:30:20 2007 -0600
@@ -1764,8 +1764,10 @@ again:
 	if (vcpu->guest_debug.enabled)
 		kvm_guest_debug_pre(vcpu);
 
-	fx_save(vcpu->host_fx_image);
-	fx_restore(vcpu->guest_fx_image);
+	if (vcpu->fx_active) {
+		fx_save(vcpu->host_fx_image);
+		fx_restore(vcpu->guest_fx_image);
+	}
 
 	save_msrs(vcpu->host_msrs, vcpu->nmsrs);
 	load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
@@ -1894,8 +1896,11 @@ again:
 	save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
 	load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
 
-	fx_save(vcpu->guest_fx_image);
-	fx_restore(vcpu->host_fx_image);
+	if (vcpu->fx_active) {
+		fx_save(vcpu->guest_fx_image);
+		fx_restore(vcpu->host_fx_image);
+	}
+
 	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
 
 	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys-and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to