As with the serialise side, Xen's copy_from_guest API is used, with a
compatibility wrapper for the userspace build.
Signed-off-by: Andrew Cooper
Signed-off-by: Sergey Dyasli
Signed-off-by: Roger Pau Monné
---
CC: Jan Beulich
CC: Wei Liu
CC: Roger Pau Monné
CC: Sergey Dyasli
CC: Ian Jackson
v2:
* Rewrite copy_from_buffer_offset() to avoid multiple evaluation of its
arguments.
* Expand boundary justifications.
---
xen/common/libx86/cpuid.c | 94 ++
xen/common/libx86/private.h| 14 +++
xen/include/xen/libx86/cpuid.h | 11 +
3 files changed, 119 insertions(+)
diff --git a/xen/common/libx86/cpuid.c b/xen/common/libx86/cpuid.c
index cf7dbd3..73cd574 100644
--- a/xen/common/libx86/cpuid.c
+++ b/xen/common/libx86/cpuid.c
@@ -123,6 +123,100 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p,
return 0;
}
+int x86_cpuid_copy_from_buffer(struct cpuid_policy *p,
+ const cpuid_leaf_buffer_t leaves,
+ uint32_t nr_leaves, uint32_t *err_leaf,
+ uint32_t *err_subleaf)
+{
+unsigned int i;
+xen_cpuid_leaf_t data;
+struct cpuid_leaf *l = (void *)
+
+/*
+ * A well formed caller is expected pass an array with leaves in order,
+ * and without any repetitions. However, due to per-vendor differences,
+ * and in the case of upgrade or levelled scenarios, we typically expect
+ * fewer than MAX leaves to be passed.
+ *
+ * Detecting repeated entries is prohibitively complicated, so we don't
+ * bother. That said, one way or another if more than MAX leaves are
+ * passed, something is wrong.
+ */
+if ( nr_leaves > CPUID_MAX_SERIALISED_LEAVES )
+return -E2BIG;
+
+for ( i = 0; i < nr_leaves; ++i )
+{
+if ( copy_from_buffer_offset(, leaves, i, 1) )
+return -EFAULT;
+
+switch ( data.leaf )
+{
+case 0 ... ARRAY_SIZE(p->basic.raw) - 1:
+switch ( data.leaf )
+{
+case 0x4:
+if ( data.subleaf >= ARRAY_SIZE(p->cache.raw) )
+goto out_of_range;
+
+p->cache.raw[data.subleaf] = *l;
+break;
+
+case 0x7:
+if ( data.subleaf >= ARRAY_SIZE(p->feat.raw) )
+goto out_of_range;
+
+p->feat.raw[data.subleaf] = *l;
+break;
+
+case 0xb:
+if ( data.subleaf >= ARRAY_SIZE(p->topo.raw) )
+goto out_of_range;
+
+p->topo.raw[data.subleaf] = *l;
+break;
+
+case 0xd:
+if ( data.subleaf >= ARRAY_SIZE(p->xstate.raw) )
+goto out_of_range;
+
+p->xstate.raw[data.leaf] = *l;
+break;
+
+default:
+p->basic.raw[data.leaf] = *l;
+break;
+}
+break;
+
+case 0x4000:
+p->hv_limit = l->a;
+break;
+
+case 0x4100:
+p->hv2_limit = l->a;
+break;
+
+case 0x8000 ... 0x8000 + ARRAY_SIZE(p->extd.raw) - 1:
+p->extd.raw[data.leaf & 0x] = *l;
+break;
+
+default:
+goto out_of_range;
+}
+}
+
+return 0;
+
+ out_of_range:
+if ( err_leaf )
+*err_leaf = data.leaf;
+if ( err_subleaf )
+*err_subleaf = data.subleaf;
+
+return -ERANGE;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/common/libx86/private.h b/xen/common/libx86/private.h
index e874fb6..dc451d0 100644
--- a/xen/common/libx86/private.h
+++ b/xen/common/libx86/private.h
@@ -12,6 +12,7 @@
#include
#define copy_to_buffer_offset copy_to_guest_offset
+#define copy_from_buffer_offset copy_from_guest_offset
#else
@@ -44,6 +45,19 @@ static inline bool test_bit(unsigned int bit, const void
*vaddr)
0; \
})
+/* memcpy(), but with copy_from_guest_offset()'s API. */
+#define copy_from_buffer_offset(dst, src, index, nr)\
+({ \
+const typeof(*(dst)) *src_ = (src); \
+typeof(*(dst)) *dst_ = (dst); \
+typeof(index) index_ = (index); \
+typeof(nr) nr_ = (nr), i_; \
+\
+for ( i_ = 0; i_ < nr_; i_++ ) \
+dst_[i_] = src_[index_ + i_]; \
+0; \
+})
+
#endif /* __XEN__ */
#endif /* XEN_LIBX86_PRIVATE_H */
diff --git a/xen/include/xen/libx86/cpuid.h b/xen/include/xen/libx86/cpuid.h
index 460b102..c4c4bd4 100644
--- a/xen/include/xen/libx86/cpuid.h
+++ b/xen/include/xen/libx86/cpuid.h
@@ -260,6