On Thu, 28 Jul 2016, Julien Grall wrote:
> Sometimes the invalidation of the TLBs can be deferred until the p2m is
> unlocked. This is for instance the case when multiple mappings are
> removed. In other case, such as shattering a superpage, an immediate
> flush is required.
>
> Keep track whether a flush is needed directly in the p2m_domain structure
> to allow serializing multiple changes. The TLBs will be invalidated when
> write unlocking the p2m if necessary.
>
> Also a new helper, p2m_flush_sync, has been introduced to force a
> synchronous TLB invalidation.
>
> Finally, replace the call to p2m_flush_tlb by p2m_flush_tlb_sync in
> apply_p2m_changes.
>
> Note this patch is not useful today, however follow-up patches will make
> advantage of it.
>
> Signed-off-by: Julien Grall
Reviewed-by: Stefano Stabellini
> xen/arch/arm/p2m.c| 33 -
> xen/include/asm-arm/p2m.h | 11 +++
> 2 files changed, 43 insertions(+), 1 deletion(-)
>
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 6b29cf0..a6dce0c 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -52,8 +52,21 @@ static inline void p2m_write_lock(struct p2m_domain *p2m)
> write_lock(&p2m->lock);
> }
>
> +static void p2m_flush_tlb(struct p2m_domain *p2m);
> +
> static inline void p2m_write_unlock(struct p2m_domain *p2m)
> {
> +if ( p2m->need_flush )
> +{
> +p2m->need_flush = false;
> +/*
> + * The final flush is done with the P2M write lock taken to
> + * to avoid someone else modify the P2M before the TLB
> + * invalidation has completed.
> + */
> +p2m_flush_tlb(p2m);
> +}
> +
> write_unlock(&p2m->lock);
> }
>
> @@ -72,6 +85,11 @@ static inline int p2m_is_locked(struct p2m_domain *p2m)
> return rw_is_locked(&p2m->lock);
> }
>
> +static inline int p2m_is_write_locked(struct p2m_domain *p2m)
> +{
> +return rw_is_write_locked(&p2m->lock);
> +}
> +
> void p2m_dump_info(struct domain *d)
> {
> struct p2m_domain *p2m = &d->arch.p2m;
> @@ -165,6 +183,19 @@ static void p2m_flush_tlb(struct p2m_domain *p2m)
> }
>
> /*
> + * Force a synchronous P2M TLB flush.
> + *
> + * Must be called with the p2m lock held.
> + */
> +static void p2m_flush_tlb_sync(struct p2m_domain *p2m)
> +{
> +ASSERT(p2m_is_write_locked(p2m));
> +
> +p2m_flush_tlb(p2m);
> +p2m->need_flush = false;
> +}
> +
> +/*
> * Lookup the MFN corresponding to a domain's GFN.
> *
> * There are no processor functions to do a stage 2 only lookup therefore we
> @@ -1142,7 +1173,7 @@ static int apply_p2m_changes(struct domain *d,
> out:
> if ( flush )
> {
> -p2m_flush_tlb(&d->arch.p2m);
> +p2m_flush_tlb_sync(&d->arch.p2m);
> ret = iommu_iotlb_flush(d, gfn_x(sgfn), nr);
> if ( !rc )
> rc = ret;
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 03bfd5e..e6be3ea 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -51,6 +51,17 @@ struct p2m_domain {
> /* Indicate if it is required to clean the cache when writing an entry */
> bool_t clean_pte;
>
> +/*
> + * P2M updates may required TLBs to be flushed (invalidated).
> + *
> + * Flushes may be deferred by setting 'need_flush' and then flushing
> + * when the p2m write lock is released.
> + *
> + * If an immediate flush is required (e.g, if a super page is
> + * shattered), call p2m_tlb_flush_sync().
> + */
> +bool need_flush;
> +
> /* Gather some statistics for information purposes only */
> struct {
> /* Number of mappings at each p2m tree level */
> --
> 1.9.1
>
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel