From: Richard Henderson
Depending on the currently selected size of the SVE vector registers,
we can either store the data within the "standard" allocation, or we
may beedn to allocate additional space with an EXTRA record.
Signed-off-by: Richard Henderson
Message-id: 20180303143823.27055-6-richard.hender...@linaro.org
Reviewed-by: Peter Maydell
Signed-off-by: Peter Maydell
---
linux-user/signal.c | 210 +++-
1 file changed, 192 insertions(+), 18 deletions(-)
diff --git a/linux-user/signal.c b/linux-user/signal.c
index f8bc0aa397..2ce5d7a3c7 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -1455,6 +1455,34 @@ struct target_extra_context {
uint32_t reserved[3];
};
+#define TARGET_SVE_MAGIC0x53564501
+
+struct target_sve_context {
+struct target_aarch64_ctx head;
+uint16_t vl;
+uint16_t reserved[3];
+/* The actual SVE data immediately follows. It is layed out
+ * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
+ * the original struct pointer.
+ */
+};
+
+#define TARGET_SVE_VQ_BYTES 16
+
+#define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
+#define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
+
+#define TARGET_SVE_SIG_REGS_OFFSET \
+QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
+#define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
+(TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
+#define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
+(TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
+#define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
+(TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
+#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
+(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -1529,6 +1557,34 @@ static void target_setup_end_record(struct
target_aarch64_ctx *end)
__put_user(0, >size);
}
+static void target_setup_sve_record(struct target_sve_context *sve,
+CPUARMState *env, int vq, int size)
+{
+int i, j;
+
+__put_user(TARGET_SVE_MAGIC, >head.magic);
+__put_user(size, >head.size);
+__put_user(vq * TARGET_SVE_VQ_BYTES, >vl);
+
+/* Note that SVE regs are stored as a byte stream, with each byte element
+ * at a subsequent address. This corresponds to a little-endian store
+ * of our 64-bit hunks.
+ */
+for (i = 0; i < 32; ++i) {
+uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
+for (j = 0; j < vq * 2; ++j) {
+__put_user_e(env->vfp.zregs[i].d[j], z + j, le);
+}
+}
+for (i = 0; i <= 16; ++i) {
+uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
+for (j = 0; j < vq; ++j) {
+uint64_t r = env->vfp.pregs[i].p[j >> 2];
+__put_user_e(r >> ((j & 3) * 16), p + j, le);
+}
+}
+}
+
static void target_restore_general_frame(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -1572,14 +1628,45 @@ static void target_restore_fpsimd_record(CPUARMState
*env,
}
}
+static void target_restore_sve_record(CPUARMState *env,
+ struct target_sve_context *sve, int vq)
+{
+int i, j;
+
+/* Note that SVE regs are stored as a byte stream, with each byte element
+ * at a subsequent address. This corresponds to a little-endian load
+ * of our 64-bit hunks.
+ */
+for (i = 0; i < 32; ++i) {
+uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
+for (j = 0; j < vq * 2; ++j) {
+__get_user_e(env->vfp.zregs[i].d[j], z + j, le);
+}
+}
+for (i = 0; i <= 16; ++i) {
+uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
+for (j = 0; j < vq; ++j) {
+uint16_t r;
+__get_user_e(r, p + j, le);
+if (j & 3) {
+env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
+} else {
+env->vfp.pregs[i].p[j >> 2] = r;
+}
+}
+}
+}
+
static int target_restore_sigframe(CPUARMState *env,
struct target_rt_sigframe *sf)
{
struct target_aarch64_ctx *ctx, *extra = NULL;
struct target_fpsimd_context *fpsimd = NULL;
+struct target_sve_context *sve = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
bool err = false;
+int vq = 0, sve_size = 0;
target_restore_general_frame(env, sf);
@@ -1611,6 +1698,18 @@ static int target_restore_sigframe(CPUARMState *env,
fpsimd = (struct target_fpsimd_context *)ctx;
break;
+case TARGET_SVE_MAGIC:
+if (arm_feature(env,