On 09.02.2026 13:30, Julian Vetter wrote:
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -71,6 +71,38 @@ static gfn_t hvm_alloc_legacy_ioreq_gfn(struct 
> ioreq_server *s)
>      return INVALID_GFN;
>  }
>  
> +static gfn_t hvm_alloc_ioreq_gfns(struct ioreq_server *s,
> +                                  unsigned int nr_pages)
> +{
> +    struct domain *d = s->target;
> +    unsigned long mask = d->arch.hvm.ioreq_gfn.mask;
> +    unsigned int i, run;
> +
> +    /* Find nr_pages consecutive set bits */
> +    for ( i = 0, run = 0; i < BITS_PER_LONG; i++ )
> +    {
> +        if ( test_bit(i, &mask) )
> +        {
> +            if ( ++run == nr_pages )
> +            {
> +                /* Found a run - clear all bits and return base GFN */
> +                unsigned int start = i - nr_pages + 1;
> +                for ( unsigned int j = start; j <= i; j++ )
> +                    clear_bit(j, &d->arch.hvm.ioreq_gfn.mask);
> +                return _gfn(d->arch.hvm.ioreq_gfn.base + start);
> +            }

We may want to gain a bitmap library function for this. Sadly
bitmap_find_free_region() is too special purpose for the needs here.
Otherwise I think ...

> +        }
> +        else
> +            run = 0;

... the construct as a whole would benefit from re-working into
if/else-if, as that'll reduce indentation by one level for the main
block of code.

Also, nit: Blank line please between declaration(s) and statement(s).

> @@ -121,52 +153,95 @@ static void hvm_free_ioreq_gfn(struct ioreq_server *s, 
> gfn_t gfn)
>      }
>  }
>  
> +static void hvm_free_ioreq_gfns(struct ioreq_server *s, gfn_t gfn,
> +                                unsigned int nr_pages)
> +{
> +    unsigned int i;
> +
> +    for ( i = 0; i < nr_pages; i++ )
> +        hvm_free_ioreq_gfn(s, _gfn(gfn_x(gfn) + i));
> +}
> +
>  static void hvm_unmap_ioreq_gfn(struct ioreq_server *s, bool buf)
>  {
> -    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    unsigned int i, nr_pages = buf ? 1 : NR_IOREQ_PAGES;
>  
> -    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> -        return;
> +    for ( i = 0; i < nr_pages; i++ )
> +    {
> +        struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreqs.page[i];
> +
> +        if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> +            continue;
>  
> -    destroy_ring_for_helper(&iorp->va, iorp->page);
> -    iorp->page = NULL;
> +        destroy_ring_for_helper(&iorp->va, iorp->page);
> +        iorp->page = NULL;
>  
> -    hvm_free_ioreq_gfn(s, iorp->gfn);
> -    iorp->gfn = INVALID_GFN;
> +        hvm_free_ioreq_gfn(s, iorp->gfn);
> +        iorp->gfn = INVALID_GFN;
> +    }
>  }
>  
>  static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool buf)
>  {
>      struct domain *d = s->target;
> -    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    unsigned int i, nr_pages = buf ? 1 : NR_IOREQ_PAGES;
> +    gfn_t base_gfn;
>      int rc;
>  
> -    if ( iorp->page )
> +    /* Check if already mapped */
> +    for ( i = 0; i < nr_pages; i++ )
>      {
> -        /*
> -         * If a page has already been allocated (which will happen on
> -         * demand if ioreq_server_get_frame() is called), then
> -         * mapping a guest frame is not permitted.
> -         */
> -        if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> -            return -EPERM;
> +        struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreqs.page[i];
>  
> -        return 0;
> +        if ( iorp->page )
> +        {
> +            /*
> +             * If a page has already been allocated (which will happen on
> +             * demand if ioreq_server_get_frame() is called), then
> +             * mapping a guest frame is not permitted.
> +             */
> +            if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> +                return -EPERM;
> +
> +            return 0;
> +        }

How can you simply return here when you found one page already mapped?
(This will likely solve itself when the data structure is changed; see
remark at the very bottom.)

> @@ -174,32 +249,43 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, 
> bool buf)
>  static void hvm_remove_ioreq_gfn(struct ioreq_server *s, bool buf)
>  {
>      struct domain *d = s->target;
> -    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    unsigned int i, nr_pages = buf ? 1 : NR_IOREQ_PAGES;
>  
> -    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> -        return;
> +    for ( i = 0; i < nr_pages; i++ )
> +    {
> +        struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreqs.page[i];
> +
> +        if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> +            continue;
>  
> -    if ( p2m_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0) )
> -        domain_crash(d);
> -    clear_page(iorp->va);
> +        if ( p2m_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0) )
> +            domain_crash(d);
> +        clear_page(iorp->va);
> +    }
>  }
>  
>  static int hvm_add_ioreq_gfn(struct ioreq_server *s, bool buf)
>  {
>      struct domain *d = s->target;
> -    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    unsigned int i, nr_pages = buf ? 1 : NR_IOREQ_PAGES;
>      int rc;
>  
> -    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> -        return 0;
> +    for ( i = 0; i < nr_pages; i++ )
> +    {
> +        struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreqs.page[i];
>  
> -    clear_page(iorp->va);
> +        if ( gfn_eq(iorp->gfn, INVALID_GFN) )
> +            continue;
>  
> -    rc = p2m_add_page(d, iorp->gfn, page_to_mfn(iorp->page), 0, p2m_ram_rw);
> -    if ( rc == 0 )
> -        paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));
> +        clear_page(iorp->va);
>  
> -    return rc;
> +        rc = p2m_add_page(d, iorp->gfn, page_to_mfn(iorp->page), 0, 
> p2m_ram_rw);
> +        if ( rc )
> +            return rc;

No rolling back of what was successfully done before will want explaining
in a comment.

> @@ -260,84 +263,120 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
>  
>  static int ioreq_server_alloc_mfn(struct ioreq_server *s, bool buf)
>  {
> -    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
>      struct page_info *page;
> +    unsigned int i, j, nr_pages = buf ? 1 : NR_IOREQ_PAGES;
>  
> -    if ( iorp->page )
> +    for ( i = 0; i < nr_pages; i++ )
>      {
> -        /*
> -         * If a guest frame has already been mapped (which may happen
> -         * on demand if ioreq_server_get_info() is called), then
> -         * allocating a page is not permitted.
> -         */
> -        if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
> -            return -EPERM;
> +        struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreqs.page[i];
>  
> -        return 0;
> -    }
> +        if ( iorp->page )
> +        {
> +            /*
> +             * If a guest frame has already been mapped (which may happen
> +             * on demand if ioreq_server_get_info() is called), then
> +             * allocating a page is not permitted.
> +             */
> +            if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
> +                return -EPERM;
> +            continue;  /* Already allocated */
> +        }
>  
> -    page = alloc_domheap_page(s->target, MEMF_no_refcount);
> +        page = alloc_domheap_page(s->target, MEMF_no_refcount);
> +        if ( !page )
> +            goto fail;
>  
> -    if ( !page )
> -        return -ENOMEM;
> +        if ( !get_page_and_type(page, s->target, PGT_writable_page) )
> +        {
> +            /*
> +             * The domain can't possibly know about this page yet, so failure
> +             * here is a clear indication of something fishy going on.
> +             */
> +            put_page_alloc_ref(page);
> +            domain_crash(s->emulator);
> +            return -ENODATA;
> +        }
>  
> -    if ( !get_page_and_type(page, s->target, PGT_writable_page) )
> -    {
> -        /*
> -         * The domain can't possibly know about this page yet, so failure
> -         * here is a clear indication of something fishy going on.
> -         */
> -        domain_crash(s->emulator);
> -        return -ENODATA;
> -    }
> +        /* Assign early so cleanup can find it */
> +        iorp->page = page;
>  
> -    iorp->va = __map_domain_page_global(page);
> -    if ( !iorp->va )
> -        goto fail;
> +        iorp->va = __map_domain_page_global(page);
> +        if ( !iorp->va )
> +            goto fail;
> +
> +        clear_page(iorp->va);
> +    }
>  
> -    iorp->page = page;
> -    clear_page(iorp->va);
>      return 0;
>  
> - fail:
> -    put_page_alloc_ref(page);
> -    put_page_and_type(page);
> +fail:
> +    /* Free all previously allocated pages */
> +    for ( j = 0; j <= i; j++ )
> +    {
> +        struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreqs.page[j];
> +        if ( iorp->page )
> +        {
> +            if ( iorp->va )
> +                  unmap_domain_page_global(iorp->va);

Nit: Indentation.

> +            iorp->va = NULL;

Perhaps best to introduce and use UNMAP_DOMAIN_PAGE_GLOBAL(), paralleling
UNMAP_DOMAIN_PAGE().

> +            put_page_alloc_ref(iorp->page);
> +            put_page_and_type(iorp->page);
> +            iorp->page = NULL;

Maybe also PUT_PAGE_AND_TYPE().

> @@ -29,6 +36,10 @@ struct ioreq_page {
>      void *va;
>  };
>  
> +struct ioreq_pages {
> +    struct ioreq_page page[NR_IOREQ_PAGES];
> +};
> +
>  struct ioreq_vcpu {
>      struct list_head list_entry;
>      struct vcpu      *vcpu;
> @@ -45,7 +56,7 @@ struct ioreq_server {
>      /* Lock to serialize toolstack modifications */
>      spinlock_t             lock;
>  
> -    struct ioreq_page      ioreq;
> +    struct ioreq_pages     ioreqs;
>      struct list_head       ioreq_vcpu_list;
>      struct ioreq_page      bufioreq;

You allocate contiguous GFNs and you will, as per what Andrew requested, also
allocate contiguous VA space. No need to inflate the structure like this then?

Having reached the bottom - how's qemu going to know that multiple pages are
in use?

Jan

Reply via email to