diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 1bbafba..08dd73f 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -287,6 +287,7 @@ struct kvm_vcpu {
 	u64 apic_base;
 	u64 ia32_misc_enable_msr;
 	int nmsrs;
+	int smsrs_bitmap;
 	struct vmx_msr_entry *guest_msrs;
 	struct vmx_msr_entry *host_msrs;
 
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 1288cff..44d8bc4 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1596,21 +1596,27 @@ void kvm_resched(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_resched);
 
-void load_msrs(struct vmx_msr_entry *e, int n)
+void load_msrs(struct vmx_msr_entry *e, int bitmap)
 {
-	int i;
+	unsigned long nr;
 
-	for (i = 0; i < n; ++i)
-		wrmsrl(e[i].index, e[i].data);
+	while (bitmap) {
+		nr = __ffs(bitmap);
+		wrmsrl(e[nr].index, e[nr].data);
+		__clear_bit(nr,&bitmap);
+	}
 }
 EXPORT_SYMBOL_GPL(load_msrs);
 
-void save_msrs(struct vmx_msr_entry *e, int n)
+void save_msrs(struct vmx_msr_entry *e, int bitmap)
 {
-	int i;
+	unsigned long nr;
 
-	for (i = 0; i < n; ++i)
-		rdmsrl(e[i].index, e[i].data);
+	while (bitmap) {
+		nr = __ffs(bitmap);
+		rdmsrl(e[nr].index, e[nr].data);
+		__clear_bit(nr,&bitmap);
+	}
 }
 EXPORT_SYMBOL_GPL(save_msrs);
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 804a623..0c69fe4 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -86,15 +86,6 @@ static const u32 vmx_msr_index[] = {
 
 #ifdef CONFIG_X86_64
 static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
 #endif
 
 static inline int is_page_fault(u32 intr_info)
@@ -117,13 +108,23 @@ static inline int is_external_interrupt(u32 intr_info)
 		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
 {
 	int i;
 
 	for (i = 0; i < vcpu->nmsrs; ++i)
 		if (vcpu->guest_msrs[i].index == msr)
-			return &vcpu->guest_msrs[i];
+			return i;
+	return -1;
+}
+
+static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+{
+	int i;
+
+	i = __find_msr_index(vcpu, msr);
+	if (i >= 0) 
+		return &vcpu->guest_msrs[i];
 	return NULL;
 }
 
@@ -306,10 +307,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 
 #ifdef CONFIG_X86_64
 	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
-		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+		save_msrs(vcpu->host_msrs, 1 << msr_offset_kernel_gs_base);
 	}
 #endif
+	load_msrs(vcpu->guest_msrs, vcpu->smsrs_bitmap);
 }
 
 static void vmx_load_host_state(struct kvm_vcpu *vcpu)
@@ -336,12 +337,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
 
 		reload_tss();
 	}
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-		load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
-	}
-#endif
+	save_msrs(vcpu->guest_msrs, vcpu->smsrs_bitmap);
+	load_msrs(vcpu->host_msrs, vcpu->smsrs_bitmap);
 }
 
 /*
@@ -469,35 +466,51 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
  */
 static void setup_msrs(struct kvm_vcpu *vcpu)
 {
-	int nr_skip, nr_good_msrs;
-
-	if (is_long_mode(vcpu))
-		nr_skip = NR_BAD_MSRS;
-	else
-		nr_skip = NR_64BIT_MSRS;
-	nr_good_msrs = vcpu->nmsrs - nr_skip;
+	int index,save_msrs;
 
-	/*
-	 * MSR_K6_STAR is only needed on long mode guests, and only
-	 * if efer.sce is enabled.
-	 */
-	if (find_msr_entry(vcpu, MSR_K6_STAR)) {
-		--nr_good_msrs;
-#ifdef CONFIG_X86_64
-		if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
-			++nr_good_msrs;
+	vcpu->smsrs_bitmap = 0;
+	if (is_long_mode(vcpu)) {
+		if ((index=__find_msr_index(vcpu, MSR_SYSCALL_MASK)) >= 0) {
+			set_bit(index, &vcpu->smsrs_bitmap);
+		}
+		if ((index=__find_msr_index(vcpu, MSR_LSTAR)) >= 0) {
+			set_bit(index, &vcpu->smsrs_bitmap);
+		}
+		if ((index=__find_msr_index(vcpu, MSR_CSTAR)) >= 0) {
+			set_bit(index, &vcpu->smsrs_bitmap);
+		}
+		if ((index=__find_msr_index(vcpu, MSR_KERNEL_GS_BASE)) >= 0) {
+			set_bit(index, &vcpu->smsrs_bitmap);
+		}
+		/*
+		 * MSR_K6_STAR is only needed on long mode guests, and only
+		 * if efer.sce is enabled.
+		 */
+		if ((index=__find_msr_index(vcpu, MSR_K6_STAR)) >= 0
+#ifdef X86_64
+			&& (vcpu->shadow_efer & EFER_SCE)
 #endif
+			) {
+			set_bit(index, &vcpu->smsrs_bitmap);
+		}
 	}
 
+	if ((index = __find_msr_index(vcpu, MSR_EFER)) >= 0) {
+		save_msrs = 1;
+	}
+	else {
+		save_msrs = 0;
+		index = 0;
+	}
 	vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
-		    virt_to_phys(vcpu->guest_msrs + nr_skip));
+		    virt_to_phys(vcpu->guest_msrs + index));
 	vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
-		    virt_to_phys(vcpu->guest_msrs + nr_skip));
+		    virt_to_phys(vcpu->guest_msrs + index));
 	vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
-		    virt_to_phys(vcpu->host_msrs + nr_skip));
-	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
-	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2 */
-	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+		    virt_to_phys(vcpu->host_msrs + index));
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, save_msrs);
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, save_msrs);
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, save_msrs);
 }
 
 /*
@@ -594,14 +607,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 	case MSR_GS_BASE:
 		vmcs_writel(GUEST_GS_BASE, data);
 		break;
-	case MSR_LSTAR:
-	case MSR_SYSCALL_MASK:
-		msr = find_msr_entry(vcpu, msr_index);
-		if (msr)
-			msr->data = data;
-		if (vcpu->vmx_host_state.loaded)
-			load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-		break;
 #endif
 	case MSR_IA32_SYSENTER_CS:
 		vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -619,6 +624,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 		msr = find_msr_entry(vcpu, msr_index);
 		if (msr) {
 			msr->data = data;
+			if (vcpu->vmx_host_state.loaded)
+				load_msrs(vcpu->guest_msrs, 
+						vcpu->smsrs_bitmap);
 			break;
 		}
 		return kvm_set_msr_common(vcpu, msr_index, data);
