Just like with cpuid_cache, it makes no sense to call KVM_GET_SUPPORTED_HV_CPUID more than once and instead of (ab)using env->features[] and/or trying to keep all the code in one place, it is better to introduce persistent hv_cpuid_cache and hv_cpuid_get_host() accessor to it.
Note, hv_cpuid_get_fw() is converted to using hv_cpuid_get_host() just to be removed later with Hyper-V specific feature words. Signed-off-by: Vitaly Kuznetsov <vkuzn...@redhat.com> --- target/i386/kvm.c | 213 +++++++++++++++++++++++----------------------- 1 file changed, 106 insertions(+), 107 deletions(-) diff --git a/target/i386/kvm.c b/target/i386/kvm.c index a9823d4af7cb..0e6eef6a52c2 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -124,6 +124,7 @@ static int has_exception_payload; static bool has_msr_mcg_ext_ctl; static struct kvm_cpuid2 *cpuid_cache; +static struct kvm_cpuid2 *hv_cpuid_cache; static struct kvm_msr_list *kvm_feature_msrs; int kvm_has_pit_state2(void) @@ -1079,9 +1080,36 @@ static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) return cpuid; } -static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) +static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg) { struct kvm_cpuid_entry2 *entry; + struct kvm_cpuid2 *cpuid; + + if (hv_cpuid_cache) { + cpuid = hv_cpuid_cache; + } else { + if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { + cpuid = get_supported_hv_cpuid(cs); + } else { + cpuid = get_supported_hv_cpuid_legacy(cs); + } + hv_cpuid_cache = cpuid; + } + + if (!cpuid) { + return 0; + } + + entry = cpuid_find_entry(cpuid, func, 0); + if (!entry) { + return 0; + } + + return cpuid_entry_get_reg(entry, reg); +} + +static uint32_t hv_cpuid_get_fw(CPUState *cs, int fw) +{ uint32_t func; int reg; @@ -1099,30 +1127,13 @@ static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) func = HV_CPUID_ENLIGHTMENT_INFO; break; default: - return -EINVAL; - } - - entry = cpuid_find_entry(cpuid, func, 0); - if (!entry) { - return -ENOENT; - } - - switch (reg) { - case R_EAX: - *r = entry->eax; - break; - case R_EDX: - *r = entry->edx; - break; - default: - return -EINVAL; + return 0; } - return 0; + return hv_cpuid_get_host(cs, func, reg); } -static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid, - int feature) +static int hv_cpuid_check_and_set(CPUState *cs, int feature) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; @@ -1155,7 +1166,8 @@ static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid, continue; } - if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) { + r = hv_cpuid_get_fw(cs, fw); + if ((r & bits) != bits) { if (hyperv_feat_enabled(cpu, feature)) { fprintf(stderr, "Hyper-V %s is not supported by kernel\n", @@ -1186,7 +1198,6 @@ static int hyperv_handle_properties(CPUState *cs, { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - struct kvm_cpuid2 *cpuid; struct kvm_cpuid_entry2 *c; uint32_t cpuid_i = 0; int r; @@ -1214,92 +1225,85 @@ static int hyperv_handle_properties(CPUState *cs, } } - if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { - cpuid = get_supported_hv_cpuid(cs); - } else { - cpuid = get_supported_hv_cpuid_legacy(cs); - } - if (cpu->hyperv_passthrough) { - c = cpuid_find_entry(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 0); - if (c) { - cpu->hyperv_vendor_id[0] = c->ebx; - cpu->hyperv_vendor_id[1] = c->ecx; - cpu->hyperv_vendor_id[2] = c->edx; - cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, - sizeof(cpu->hyperv_vendor_id) + 1); - memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, - sizeof(cpu->hyperv_vendor_id)); - } - - c = cpuid_find_entry(cpuid, HV_CPUID_INTERFACE, 0); - if (c) { - cpu->hyperv_interface_id[0] = c->eax; - cpu->hyperv_interface_id[1] = c->ebx; - cpu->hyperv_interface_id[2] = c->ecx; - cpu->hyperv_interface_id[3] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_VERSION, 0); - if (c) { - cpu->hyperv_version_id[0] = c->eax; - cpu->hyperv_version_id[1] = c->ebx; - cpu->hyperv_version_id[2] = c->ecx; - cpu->hyperv_version_id[3] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0); - if (c) { - env->features[FEAT_HYPERV_EAX] = c->eax; - env->features[FEAT_HYPERV_EBX] = c->ebx; - env->features[FEAT_HYPERV_EDX] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_IMPLEMENT_LIMITS, 0); - if (c) { - cpu->hv_max_vps = c->eax; - cpu->hyperv_limits[0] = c->ebx; - cpu->hyperv_limits[1] = c->ecx; - cpu->hyperv_limits[2] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); - if (c) { - env->features[FEAT_HV_RECOMM_EAX] = c->eax; - cpu->hyperv_spinlock_attempts = c->ebx; - } - c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0); - if (c) { - env->features[FEAT_HV_NESTED_EAX] = c->eax; - } + cpu->hyperv_vendor_id[0] = + hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); + cpu->hyperv_vendor_id[1] = + hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); + cpu->hyperv_vendor_id[2] = + hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); + cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, + sizeof(cpu->hyperv_vendor_id) + 1); + memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, + sizeof(cpu->hyperv_vendor_id)); + + cpu->hyperv_interface_id[0] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX); + cpu->hyperv_interface_id[1] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX); + cpu->hyperv_interface_id[2] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX); + cpu->hyperv_interface_id[3] = + hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); + + cpu->hyperv_version_id[0] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); + cpu->hyperv_version_id[1] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX); + cpu->hyperv_version_id[2] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); + cpu->hyperv_version_id[3] = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX); + + env->features[FEAT_HYPERV_EAX] = + hv_cpuid_get_host(cs, HV_CPUID_FEATURES, R_EAX); + env->features[FEAT_HYPERV_EBX] = + hv_cpuid_get_host(cs, HV_CPUID_FEATURES, R_EBX); + env->features[FEAT_HYPERV_EDX] = + hv_cpuid_get_host(cs, HV_CPUID_FEATURES, R_EDX); + + cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, + R_EAX); + cpu->hyperv_limits[0] = + hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); + cpu->hyperv_limits[1] = + hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); + cpu->hyperv_limits[2] = + hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); + + env->features[FEAT_HV_RECOMM_EAX] = + hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); + cpu->hyperv_spinlock_attempts = + hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); + + env->features[FEAT_HV_NESTED_EAX] = + hv_cpuid_get_host(cs, HV_CPUID_NESTED_FEATURES, R_EAX); } if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING; } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); - if (c) { - env->features[FEAT_HV_RECOMM_EAX] |= - c->eax & HV_NO_NONARCH_CORESHARING; - } + env->features[FEAT_HV_RECOMM_EAX] |= + hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & + HV_NO_NONARCH_CORESHARING; } /* Features */ - r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI); - r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT); + r = hv_cpuid_check_and_set(cs, HYPERV_FEAT_RELAXED); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_VAPIC); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_TIME); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_CRASH); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_RESET); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_VPINDEX); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_RUNTIME); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_SYNIC); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_STIMER); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_FREQUENCIES); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_REENLIGHTENMENT); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_TLBFLUSH); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_EVMCS); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_IPI); + r |= hv_cpuid_check_and_set(cs, HYPERV_FEAT_STIMER_DIRECT); /* Additional dependencies not covered by kvm_hyperv_properties[] */ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && @@ -1315,8 +1319,7 @@ static int hyperv_handle_properties(CPUState *cs, env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; if (r) { - r = -ENOSYS; - goto free; + return -ENOSYS; } c = &cpuid_ent[cpuid_i++]; @@ -1373,12 +1376,8 @@ static int hyperv_handle_properties(CPUState *cs, c->function = HV_CPUID_NESTED_FEATURES; c->eax = env->features[FEAT_HV_NESTED_EAX]; } - r = cpuid_i; - -free: - g_free(cpuid); - return r; + return cpuid_i; } static Error *hv_passthrough_mig_blocker; -- 2.25.4