On 11/06/2025 6:16 pm, Roger Pau Monne wrote:
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 7536b6c8717e..2f438ce367cf 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -461,30 +461,6 @@ void domain_cpu_policy_changed(struct domain *d)
>      }
>  }
>  
> -#if !defined(CONFIG_BIGMEM) && defined(CONFIG_PDX_COMPRESSION)
> -/*
> - * The hole may be at or above the 44-bit boundary, so we need to determine
> - * the total bit count until reaching 32 significant (not squashed out) bits
> - * in PFN representations.
> - * Note that the way "bits" gets initialized/updated/bounds-checked 
> guarantees
> - * that the function will never return zero, and hence will never be called
> - * more than once (which is important due to it being deliberately placed in
> - * .init.text).
> - */
> -static unsigned int __init noinline _domain_struct_bits(void)
> -{
> -    unsigned int bits = 32 + PAGE_SHIFT;
> -    unsigned int sig = hweight32(~pfn_hole_mask);
> -    unsigned int mask = pfn_hole_mask >> 32;
> -
> -    for ( ; bits < BITS_PER_LONG && sig < 32; ++bits, mask >>= 1 )
> -        if ( !(mask & 1) )
> -            ++sig;
> -
> -    return bits;
> -}
> -#endif
> -

I'm very happy to see this disappear.  Both because of a non-__init
function calling an __init function, and because this internal is just
horrible.

>  struct domain *alloc_domain_struct(void)
>  {
>      struct domain *d;
> @@ -498,14 +474,15 @@ struct domain *alloc_domain_struct(void)
>       * On systems with CONFIG_BIGMEM there's no packing, and so there's no
>       * such restriction.
>       */
> -#if defined(CONFIG_BIGMEM) || !defined(CONFIG_PDX_COMPRESSION)
> -    const unsigned int bits = IS_ENABLED(CONFIG_BIGMEM) ? 0 :
> -                                                          32 + PAGE_SHIFT;
> +#if defined(CONFIG_BIGMEM)
> +    const unsigned int bits = 0;
>  #else
> -    static unsigned int __read_mostly bits;
> +    static unsigned int __ro_after_init bits;
>  
>      if ( unlikely(!bits) )
> -         bits = _domain_struct_bits();
> +         bits = flsl(pfn_to_paddr(pdx_to_pfn(
> +             1UL << (sizeof(((struct page_info *)NULL)->v.inuse._domain) * 
> 8))))
> +             - 1;

I think this would benefit greatly by not being a oneliner.  There's
sizeof_field() which helps a little.

But, isn't this UB with CONFIG_BIGMEM?  You're shifting 1UL by 64.

When __pdx_t is unsigned long, there's no bits restriction necessary. 
Therefore, don't you want !bits && sizeof_field(...) < BYTES_PER_LONG as
the entry criteria?

~Andrew

Reply via email to