On 15.04.2025 17:32, Roger Pau Monne wrote:
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -370,7 +370,12 @@ static int hvmemul_do_io(
>          /* If there is no suitable backing DM, just ignore accesses */
>          if ( !s )
>          {
> -            if ( is_mmio && is_hardware_domain(currd) )
> +            if ( is_mmio && is_hardware_domain(currd) &&
> +                 /*
> +                  * Do not attempt to fixup accesses to r/o MMIO regions, 
> they
> +                  * are expected to be terminated by the null handler below.
> +                  */
> +                 !rangeset_contains_singleton(mmio_ro_ranges, 
> PFN_DOWN(addr)) )
>              {
>                  /*
>                   * PVH dom0 is likely missing MMIO mappings on the p2m, due 
> to

Doesn't this need limiting to writes, i.e. permitting reads to still be
handled right here?

> --- /dev/null
> +++ b/xen/arch/x86/hvm/mmio.c
> @@ -0,0 +1,100 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * MMIO related routines.
> + *
> + * Copyright (c) 2025 Cloud Software Group
> + */
> +
> +#include <xen/io.h>
> +#include <xen/mm.h>
> +
> +#include <asm/p2m.h>
> +
> +static int cf_check subpage_mmio_accept(struct vcpu *v, unsigned long addr)
> +{
> +    p2m_type_t t;
> +    mfn_t mfn = get_gfn_query_unlocked(v->domain, addr, &t);

Don't you need to use PFN_DOWN() on addr?

> +    return !mfn_eq(mfn, INVALID_MFN) && t == p2m_mmio_direct &&
> +           subpage_mmio_find_page(mfn);
> +}
> +
> +static int cf_check subpage_mmio_read(
> +    struct vcpu *v, unsigned long addr, unsigned int len, unsigned long 
> *data)
> +{
> +    struct domain *d = v->domain;
> +    p2m_type_t t;
> +    mfn_t mfn = get_gfn_query(d, addr, &t);

Same here and further down, and in the write case?

> +    struct subpage_ro_range *entry;
> +    volatile void __iomem *mem;
> +
> +    *data = ~0UL;
> +
> +    if ( mfn_eq(mfn, INVALID_MFN) || t != p2m_mmio_direct )
> +    {
> +        put_gfn(d, addr);
> +        return X86EMUL_RETRY;
> +    }
> +
> +    entry = subpage_mmio_find_page(mfn);
> +    if ( !entry )
> +    {
> +        put_gfn(d, addr);
> +        return X86EMUL_OKAY;
> +    }
> +
> +    mem = subpage_mmio_map_page(entry);
> +    if ( !mem )
> +    {
> +        put_gfn(d, addr);
> +        gprintk(XENLOG_ERR,
> +                "Failed to map page for MMIO read at %#lx -> %#lx\n",
> +                addr, mfn_to_maddr(mfn) + PAGE_OFFSET(addr));
> +        return X86EMUL_OKAY;
> +    }
> +
> +    *data = read_mmio(mem + PAGE_OFFSET(addr), len);

What if this crosses the trailing page boundary? Imo subpage_mmio_accept()
would better reject misaligned accesses (at least until we know we need to
handle such for some obscure reason).

> +    put_gfn(d, addr);
> +    return X86EMUL_OKAY;
> +}

Thinking of it - why do reads need handling here? The guest has read access?
Hmm, yes, read-modify-write operations may take this path. Maybe worth
having a comment here.

> +void register_subpage_ro_handler(struct domain *d)
> +{
> +    static const struct hvm_mmio_ops subpage_mmio_ops = {
> +        .check = subpage_mmio_accept,
> +        .read = subpage_mmio_read,
> +        .write = subpage_mmio_write,
> +    };
> +
> +    register_mmio_handler(d, &subpage_mmio_ops);

Are there any criteria by which we could limit the registration of this
handler?

Jan

Reply via email to