On 09/05/2025 4:57 pm, Oleksii Kurochko wrote:
> diff --git a/xen/arch/riscv/include/asm/p2m.h 
> b/xen/arch/riscv/include/asm/p2m.h
> index 28f57a74f2..8b46210768 100644
> --- a/xen/arch/riscv/include/asm/p2m.h
> +++ b/xen/arch/riscv/include/asm/p2m.h
> @@ -3,11 +3,73 @@
>  #define ASM__RISCV__P2M_H
>  
>  #include <xen/errno.h>
> +#include <xen/mem_access.h>
> +#include <xen/mm.h>
> +#include <xen/radix-tree.h>
> +#include <xen/rwlock.h>
> +#include <xen/types.h>

We're phasing out the inclusion of xen/types.h for complex headers like
this, as it's pulled in by almost all dependencies.

> diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
> new file mode 100644
> index 0000000000..ad4beef8f9
> --- /dev/null
> +++ b/xen/arch/riscv/p2m.c
> @@ -0,0 +1,168 @@
> +#include <xen/domain_page.h>
> +#include <xen/iommu.h>
> +#include <xen/lib.h>
> +#include <xen/mm.h>
> +#include <xen/pfn.h>
> +#include <xen/rwlock.h>
> +#include <xen/sched.h>
> +#include <xen/spinlock.h>
> +
> +#include <asm/page.h>
> +#include <asm/p2m.h>
> +
> +/*
> + * Force a synchronous P2M TLB flush.
> + *
> + * Must be called with the p2m lock held.
> + *
> + * TODO: add support of flushing TLB connected to VMID.
> + */
> +static void p2m_force_tlb_flush_sync(struct p2m_domain *p2m)
> +{
> +    ASSERT(p2m_is_write_locked(p2m));
> +
> +    /*
> +     * TODO: shouldn't be this flush done for each physical CPU?
> +     *       If yes, then SBI call sbi_remote_hfence_gvma() could
> +     *       be used for that.
> +     */
> +#if defined(__riscv_hh) || defined(__riscv_h)
> +    asm volatile ( "hfence.gvma" ::: "memory" );
> +#else
> +    asm volatile ( ".insn r 0x73, 0x0, 0x31, x0, x0, x0" ::: "memory" );
> +#endif

TLB flushing needs to happen for each pCPU which potentially has cached
a mapping.

In other arches, this is tracked by d->dirty_cpumask which is the bitmap
of pCPUs where this domain is scheduled.

CPUs need to flush their TLBs before removing themselves from
d->dirty_cpumask, which is typically done during context switch, but it
means that to flush the P2M, you only need to IPI a subset of CPUs.


> +
> +    p2m->need_flush = false;
> +}
> +
> +static void p2m_tlb_flush_sync(struct p2m_domain *p2m)
> +{
> +    if ( p2m->need_flush )
> +        p2m_force_tlb_flush_sync(p2m);
> +}
> +
> +/* Unlock the flush and do a P2M TLB flush if necessary */
> +void p2m_write_unlock(struct p2m_domain *p2m)
> +{
> +    /*
> +     * The final flush is done with the P2M write lock taken to avoid
> +     * someone else modifying the P2M wbefore the TLB invalidation has
> +     * completed.
> +     */
> +    p2m_tlb_flush_sync(p2m);
> +
> +    write_unlock(&p2m->lock);
> +}
> +
> +static void clear_and_clean_page(struct page_info *page)
> +{
> +    void *p = __map_domain_page(page);
> +
> +    clear_page(p);
> +    unmap_domain_page(p);
> +}
> +
> +static struct page_info *p2m_get_clean_page(struct domain *d)
> +{
> +    struct page_info *page;
> +
> +    /*
> +     * As mentioned in the Priviliged Architecture Spec (version 20240411)
> +     * As explained in Section 18.5.1, for the paged virtual-memory schemes
> +     * (Sv32x4, Sv39x4, Sv48x4, and Sv57x4), the root page table is 16 KiB
> +     * and must be aligned to a 16-KiB boundary.
> +     */
> +    page = alloc_domheap_pages(NULL, 2, 0);
> +    if ( page == NULL )
> +        return NULL;
> +
> +    clear_and_clean_page(page);

You appear to have allocated 4 pages, but only zeroed one.

~Andrew

Reply via email to