On 18.11.2024 09:49, ngoc-tu.d...@vates.tech wrote:
> --- a/xen/arch/x86/cpu-policy.c
> +++ b/xen/arch/x86/cpu-policy.c
> @@ -788,6 +788,9 @@ static void __init calculate_hvm_max_policy(void)
>  
>          if ( !cpu_has_vmx_xsaves )
>              __clear_bit(X86_FEATURE_XSAVES, fs);
> +
> +        if ( !cpu_has_vmx_guest_lbr_ctl )
> +            __clear_bit(X86_FEATURE_ARCH_LBR, fs);

How will this be reflected onto leaf 1C? Patch 3 doesn't check this bit.
In fact I wonder whether patch 1 shouldn't introduce dependencies of all
leaf 1C bits upon this one bit (in tools/gen-cpuid.py).

> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -423,65 +423,96 @@ static int cf_check vmx_pi_update_irte(const struct 
> vcpu *v,
>      return rc;
>  }
>  
> -static const struct lbr_info {
> +struct lbr_info {
>      u32 base, count;
> -} p4_lbr[] = {
> -    { MSR_P4_LER_FROM_LIP,          1 },
> -    { MSR_P4_LER_TO_LIP,            1 },
> -    { MSR_P4_LASTBRANCH_TOS,        1 },
> -    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
> -    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
> -    { 0, 0 }
> +    u64 initial;

uint64_t please in new code.

> +};
> +
> +static const struct lbr_info p4_lbr[] = {
> +    { MSR_P4_LER_FROM_LIP,          1, 0 },
> +    { MSR_P4_LER_TO_LIP,            1, 0 },
> +    { MSR_P4_LASTBRANCH_TOS,        1, 0 },
> +    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO, 0 },
> +    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO, 0 },
> +    { 0, 0, 0 }

If these adjustments are really needed, I'd wonder whether we wouldn't be
better of switching to C99 initializers instead.

> +static struct lbr_info __ro_after_init architectural_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,        1, 0 },
> +    { MSR_IA32_LASTINTTOIP,          1, 0 },
> +    { MSR_IA32_LER_INFO,             1, 0 },
> +    /* to be updated by update_arch_lbr */

Nit: Comment style (start with a capital letter).

> +    { MSR_IA32_LASTBRANCH_0_INFO,    MAX_MSR_ARCH_LASTBRANCH_FROM_TO, 0 },
> +    { MSR_IA32_LASTBRANCH_0_FROM_IP, MAX_MSR_ARCH_LASTBRANCH_FROM_TO, 0 },
> +    { MSR_IA32_LASTBRANCH_0_TO_IP,   MAX_MSR_ARCH_LASTBRANCH_FROM_TO, 0 },
> +    { 0, 0, 0 }
> +};
> +static uint64_t __ro_after_init host_lbr_depth = 0;

No need for the initializer.

> +static void __init update_arch_lbr(void)
> +{
> +    struct lbr_info *lbr = architectural_lbr;
> +
> +    if ( boot_cpu_has(X86_FEATURE_ARCH_LBR) )
> +        rdmsrl(MSR_IA32_LASTBRANCH_DEPTH, host_lbr_depth);

Again you're reading an MSR here which was never written. Are you perhaps
assuming that the reset value is still in place?

> +    ASSERT((host_lbr_depth % 8) == 0 && (host_lbr_depth <= 64));
> +
> +    for ( ; lbr->count; lbr++ ) {
> +        if ( lbr->base == MSR_IA32_LASTBRANCH_0_INFO ||
> +             lbr->base == MSR_IA32_LASTBRANCH_0_FROM_IP ||
> +             lbr->base == MSR_IA32_LASTBRANCH_0_TO_IP )
> +            lbr->count = (u32)host_lbr_depth;

You don't want to use presently undefined bits here which may happen to
become defined on future hardware. IOW a cast is insufficient here.
(Comment applies to patch 2 as well.)

> @@ -3303,25 +3336,36 @@ static void __init ler_to_fixup_check(void)
>      }
>  }
>  
> -static int is_last_branch_msr(u32 ecx)
> +static const struct lbr_info * find_last_branch_msr(struct vcpu *v, u32 ecx)

Nit: Excess blank after the first *, and please take the opportunity to
switch to uint32_t.

>  {
> +    /*
> +     * Model-specific and architectural LBRs are mutually exclusive.
> +     * It's not necessary to check both lbr_info lists.
> +     */
>      const struct lbr_info *lbr = model_specific_lbr;
> +    const struct cpu_policy *cp = v->domain->arch.cpu_policy;
>  
> -    if ( lbr == NULL )
> -        return 0;
> +    if ( lbr == NULL ) {
> +        if ( cp->feat.arch_lbr )
> +            lbr = architectural_lbr;

I'm inclined to think that this should be independent of lbr being NULL.
That would then also eliminate the style issue (with the placement of the
figure brace).

By the end of the patch / series, what I'm missing are context switch and
migration handling. You want to engage XSAVES to cover both (it being the
first XSS component we support, there'll be prereq work necessary, as I
think Andrew did already point out). Or did I overlook anything?

Jan

Reply via email to