Hi Julien,
On 2024-02-13 18:14, Julien Grall wrote:
Hi Jan,
On 13/02/2024 07:13, Jan Beulich wrote:
On 12.02.2024 19:38, Julien Grall wrote:
An alternative would be to introduced arch_grant_cache_flush() and
move
the if/else logic there. Something like:
diff --git a/xen/arch/arm/include/asm/page.h
b/xen/arch/arm/include/asm/page.h
index 69f817d1e68a..4a3de49762a1 100644
--- a/xen/arch/arm/include/asm/page.h
+++ b/xen/arch/arm/include/asm/page.h
@@ -281,6 +281,19 @@ static inline void write_pte(lpae_t *p, lpae_t
pte)
dsb(sy);
}
+static inline arch_grant_cache_flush(unsigned int op, const void *p,
unsigned long size)
+{
+ unsigned int order = get_order_from_bytes(size);
+
+ if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op &
GNTTAB_CACHE_CLEAN) )
+ clean_and_invalidate_dcache_va_range(v, cflush->length);
+ else if ( cflush->op & GNTTAB_CACHE_INVAL )
+ invalidate_dcache_va_range(v, cflush->length);
+ else if ( cflush->op & GNTTAB_CACHE_CLEAN )
+ clean_dcache_va_range(v, cflush->length);
+
+ return 0;
+}
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn, bool sync_icache);
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 424744ad5e1a..647e1522466d 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -735,8 +735,7 @@ void asmlinkage __init start_xen(unsigned long
boot_phys_offset,
fdt_paddr);
/* Register Xen's load address as a boot module. */
- xen_bootmodule = add_boot_module(BOOTMOD_XEN,
- virt_to_maddr(_start),
+ xen_bootmodule = add_boot_module(BOOTMOD_XEN,
virt_to_maddr(_start),
(paddr_t)(uintptr_t)(_end - _start),
false);
BUG_ON(!xen_bootmodule);
diff --git a/xen/arch/x86/include/asm/flushtlb.h
b/xen/arch/x86/include/asm/flushtlb.h
index bb0ad58db49b..dfe51cddde90 100644
--- a/xen/arch/x86/include/asm/flushtlb.h
+++ b/xen/arch/x86/include/asm/flushtlb.h
@@ -182,23 +182,22 @@ void flush_area_mask(const cpumask_t *mask,
const
void *va,
}
static inline void flush_page_to_ram(unsigned long mfn, bool
sync_icache) {}
-static inline int invalidate_dcache_va_range(const void *p,
- unsigned long size)
-{ return -EOPNOTSUPP; }
-static inline int clean_and_invalidate_dcache_va_range(const void
*p,
- unsigned long
size)
+
+unsigned int guest_flush_tlb_flags(const struct domain *d);
+void guest_flush_tlb_mask(const struct domain *d, const cpumask_t
*mask);
+
+static inline arch_grant_cache_flush(unsigned int op, const void *p,
unsigned long size)
{
- unsigned int order = get_order_from_bytes(size);
+ unsigned int order;
+
+ if ( !(cflush->op & GNTTAB_CACHE_CLEAN) )
+ return -EOPNOTSUPP;
+
+ order = get_order_from_bytes(size);
/* sub-page granularity support needs to be added if necessary
*/
flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
+
return 0;
}
-static inline int clean_dcache_va_range(const void *p, unsigned long
size)
-{
- return clean_and_invalidate_dcache_va_range(p, size);
-}
-
-unsigned int guest_flush_tlb_flags(const struct domain *d);
-void guest_flush_tlb_mask(const struct domain *d, const cpumask_t
*mask);
#endif /* __FLUSHTLB_H__ */
I have a slight preference for the latter. I would like to hear the
opinion of the others.
I would prefer this 2nd form, too, assuming the setup.c change wasn't
really meant to be there.
Indeed. I had another previous change I didn't and forgot to remove it.
The one thing that doesn't become clear: In
the sketch above arch_grant_cache_flush() has no return type, yet has
"return 0". This raises a question towards the one that's at the root
of this thread: Do you mean the function to have a return value, and
if so will it be (sensibly) used?
Sorry I should have double checked the code before sending it.
arch_grant_cache_flush() should return a value. So each arch can decide
if they handle a given operation.
Cheers,
I do like the idea. I applied some of the suggestions to this proof of
concept patch (attached). Still not compile-tested, since the CI seems a
bit slow today.
--
Nicola Vetrini, BSc
Software Engineer, BUGSENG srl (https://bugseng.com)
diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h
index 69f817d1e68a..5f9357632164 100644
--- a/xen/arch/arm/include/asm/page.h
+++ b/xen/arch/arm/include/asm/page.h
@@ -159,13 +159,13 @@ static inline size_t read_dcache_line_bytes(void)
* if 'range' is large enough we might want to use model-specific
* full-cache flushes. */
-static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+static inline void invalidate_dcache_va_range(const void *p, unsigned long size)
{
size_t cacheline_mask = dcache_line_bytes - 1;
unsigned long idx = 0;
if ( !size )
- return 0;
+ return;
/* Passing a region that wraps around is illegal */
ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);
@@ -188,17 +188,15 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx));
dsb(sy); /* So we know the flushes happen before continuing */
-
- return 0;
}
-static inline int clean_dcache_va_range(const void *p, unsigned long size)
+static inline void clean_dcache_va_range(const void *p, unsigned long size)
{
size_t cacheline_mask = dcache_line_bytes - 1;
unsigned long idx = 0;
if ( !size )
- return 0;
+ return;
/* Passing a region that wraps around is illegal */
ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);
@@ -211,18 +209,16 @@ static inline int clean_dcache_va_range(const void *p, unsigned long size)
idx += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_dcache_one(0) : : "r" (p + idx));
dsb(sy); /* So we know the flushes happen before continuing */
- /* ARM callers assume that dcache_* functions cannot fail. */
- return 0;
}
-static inline int clean_and_invalidate_dcache_va_range
+static inline void clean_and_invalidate_dcache_va_range
(const void *p, unsigned long size)
{
size_t cacheline_mask = dcache_line_bytes - 1;
unsigned long idx = 0;
if ( !size )
- return 0;
+ return;
/* Passing a region that wraps around is illegal */
ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);
@@ -235,8 +231,6 @@ static inline int clean_and_invalidate_dcache_va_range
idx += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx));
dsb(sy); /* So we know the flushes happen before continuing */
- /* ARM callers assume that dcache_* functions cannot fail. */
- return 0;
}
/* Macros for flushing a single small item. The predicate is always
@@ -266,6 +260,20 @@ static inline int clean_and_invalidate_dcache_va_range
: : "r" (_p), "m" (*_p)); \
} while (0)
+static inline int arch_grant_cache_flush(unsigned int op, const void *p,
+ unsigned long size)
+{
+ if ( (op & GNTTAB_CACHE_INVAL) && (op & GNTTAB_CACHE_CLEAN) )
+ clean_and_invalidate_dcache_va_range(v, size);
+ else if ( op & GNTTAB_CACHE_INVAL )
+ invalidate_dcache_va_range(v, size);
+ else if ( op & GNTTAB_CACHE_CLEAN )
+ clean_dcache_va_range(v, size);
+
+ /* ARM callers assume that dcache_* functions cannot fail. */
+ return 0;
+}
+
/*
* Write a pagetable entry.
*
diff --git a/xen/arch/x86/include/asm/flushtlb.h b/xen/arch/x86/include/asm/flushtlb.h
index bb0ad58db49b..7c71fe377757 100644
--- a/xen/arch/x86/include/asm/flushtlb.h
+++ b/xen/arch/x86/include/asm/flushtlb.h
@@ -182,21 +182,21 @@ void flush_area_mask(const cpumask_t *mask, const void *va,
}
static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
-static inline int invalidate_dcache_va_range(const void *p,
- unsigned long size)
-{ return -EOPNOTSUPP; }
-static inline int clean_and_invalidate_dcache_va_range(const void *p,
- unsigned long size)
+
+static inline arch_grant_cache_flush(unsigned int op, const void *p,
+ unsigned long size)
{
- unsigned int order = get_order_from_bytes(size);
+ unsigned int order;
+
+ if ( !(cflush->op & GNTTAB_CACHE_CLEAN) )
+ return -EOPNOTSUPP;
+
+ order = get_order_from_bytes(size);
/* sub-page granularity support needs to be added if necessary */
flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
+
return 0;
}
-static inline int clean_dcache_va_range(const void *p, unsigned long size)
-{
- return clean_and_invalidate_dcache_va_range(p, size);
-}
unsigned int guest_flush_tlb_flags(const struct domain *d);
void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 5721eab22561..8615ea144bb3 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3572,14 +3572,7 @@ static int _cache_flush(const gnttab_cache_flush_t *cflush, grant_ref_t *cur_ref
v = map_domain_page(mfn);
v += cflush->offset;
- if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & GNTTAB_CACHE_CLEAN) )
- ret = clean_and_invalidate_dcache_va_range(v, cflush->length);
- else if ( cflush->op & GNTTAB_CACHE_INVAL )
- ret = invalidate_dcache_va_range(v, cflush->length);
- else if ( cflush->op & GNTTAB_CACHE_CLEAN )
- ret = clean_dcache_va_range(v, cflush->length);
- else
- ret = 0;
+ ret = arch_grant_cache_flush(cflush->op, v, cflush->length);
if ( d != owner )
{