On 05.06.19 18:39, Ralf Ramsauer wrote:
We trap certain MMIO accesses and need to emulate their access.
On x86, a 32-bit read will clear bits 32-63 of a register.
Inconsistently, on x86, 16-bit and 8-bit reads must not clear high bits.
Jailhouse erroneously cleared those bits. Prevent this by applying a
preserved mask that keeps bits alive.
Add tests that check correct behaviour.
Signed-off-by: Ralf Ramsauer <[email protected]>
---
This patch is not related to "x86: mmio: add support for 0x66 operand
prefix", it's just a bug that I stumbled over while working on a v2 of
the other patch.
The calculation of the preserve mask dynamically creates a bitmask. At
the moment, only 8 bit support would be required, but it already
supports 16 bit support, as that'll be introduced in the 0x66 operand
prefix override support patch.
hypervisor/arch/x86/include/asm/mmio.h | 5 +++++
hypervisor/arch/x86/mmio.c | 7 ++++++-
hypervisor/arch/x86/vcpu.c | 7 +++++--
inmates/tests/x86/mmio-access-32.c | 10 +++++++---
inmates/tests/x86/mmio-access.c | 10 +++++++---
5 files changed, 30 insertions(+), 9 deletions(-)
diff --git a/hypervisor/arch/x86/include/asm/mmio.h
b/hypervisor/arch/x86/include/asm/mmio.h
index 756c84a8..e6c4e4dc 100644
--- a/hypervisor/arch/x86/include/asm/mmio.h
+++ b/hypervisor/arch/x86/include/asm/mmio.h
@@ -30,6 +30,11 @@ struct mmio_instruction {
/** Output value, already copied either from a register or
* from an immediate value */
unsigned long out_val;
+ /** A read must not clear the upper bits of registers, if the access
+ * width is smaller than 32 bit. This mask describes the bits that have
+ * to be preserved.
+ */
+ unsigned long read_preserve_mask;
reg_preserve_mask? Makes it clearer where it applies (not on the MMIO access).
};
/**
diff --git a/hypervisor/arch/x86/mmio.c b/hypervisor/arch/x86/mmio.c
index b234bd79..e8bf32bc 100644
--- a/hypervisor/arch/x86/mmio.c
+++ b/hypervisor/arch/x86/mmio.c
@@ -85,7 +85,8 @@ x86_mmio_parse(const struct guest_paging_structures
*pg_structs, bool is_write)
struct parse_context ctx = { .remaining = X86_MAX_INST_LEN,
.count = 1 };
union registers *guest_regs = &this_cpu_data()->guest_regs;
- struct mmio_instruction inst = { .inst_len = 0 };
+ struct mmio_instruction inst = { .inst_len = 0,
+ .read_preserve_mask = 0, };
I think we can just do "{ 0 }" here which states that the whole structure is
0-initialized.
u64 pc = vcpu_vendor_get_rip();
unsigned int n, skip_len = 0;
bool has_immediate = false;
@@ -168,6 +169,10 @@ restart:
op[2].raw = *ctx.inst;
+ if (!does_write && inst.access_size < 4)
+ inst.read_preserve_mask =
+ ~((1UL << (inst.access_size * 8)) - 1);
BYTE_MASK(inst.access_size)
+
/* ensure that we are actually talking about mov imm,<mem> */
if (op[0].raw == X86_OP_MOV_IMMEDIATE_TO_MEM && op[2].modrm.reg != 0)
goto error_unsupported;
diff --git a/hypervisor/arch/x86/vcpu.c b/hypervisor/arch/x86/vcpu.c
index 5a557d0b..3f6a13f1 100644
--- a/hypervisor/arch/x86/vcpu.c
+++ b/hypervisor/arch/x86/vcpu.c
@@ -231,6 +231,7 @@ bool vcpu_handle_mmio_access(void)
struct mmio_access mmio = {.size = 0};
struct vcpu_mmio_intercept intercept;
struct mmio_instruction inst;
+ unsigned long *value;
Maybe rather "reg"?
vcpu_vendor_get_mmio_intercept(&intercept);
@@ -249,8 +250,10 @@ bool vcpu_handle_mmio_access(void)
result = mmio_handle_access(&mmio);
if (result == MMIO_HANDLED) {
- if (!mmio.is_write)
- guest_regs->by_index[inst.in_reg_num] = mmio.value;
+ if (!mmio.is_write) {
+ value = &guest_regs->by_index[inst.in_reg_num];
+ *value = (*value & inst.read_preserve_mask) |
mmio.value;
+ }
vcpu_skip_emulated_instruction(inst.inst_len);
return true;
}
diff --git a/inmates/tests/x86/mmio-access-32.c
b/inmates/tests/x86/mmio-access-32.c
index 2f47f211..be1d470f 100644
--- a/inmates/tests/x86/mmio-access-32.c
+++ b/inmates/tests/x86/mmio-access-32.c
@@ -47,9 +47,13 @@ void inmate_main(void)
EXPECT_EQUAL(reg32, pattern);
/* MOV_FROM_MEM (8a), 8-bit data */
- asm volatile("movb (%%ebx), %%al"
- : "=a" (reg32) : "a" (0), "b" (mmio_reg));
- EXPECT_EQUAL(reg32, (u8)pattern);
+ asm volatile("movb (%%eax), %%al"
+ : "=a" (reg32) : "a" (mmio_reg));
+ EXPECT_EQUAL((u8)reg32, (u8)pattern);
+ /* %al should contain 0x44, while higher bits still hold the rest of
+ * mmio_reg. Test this. */
+ EXPECT_EQUAL(reg32,
+ ((unsigned long)mmio_reg & ~0xffUL) | (pattern & 0xff));
/* MOVZXB (0f b6), 32-bit data, 32-bit address */
asm volatile("movzxb (%%ebx), %%eax"
diff --git a/inmates/tests/x86/mmio-access.c b/inmates/tests/x86/mmio-access.c
index 0e6a56b1..a9d2fcaf 100644
--- a/inmates/tests/x86/mmio-access.c
+++ b/inmates/tests/x86/mmio-access.c
@@ -67,9 +67,13 @@ void inmate_main(void)
EXPECT_EQUAL(reg64, (u32)pattern);
/* MOV_FROM_MEM (8a), 8-bit data */
- asm volatile("movb (%%rbx), %%al"
- : "=a" (reg64) : "a" (0), "b" (mmio_reg));
- EXPECT_EQUAL(reg64, (u8)pattern);
+ asm volatile("movb (%%rax), %%al"
+ : "=a" (reg64) : "a" (mmio_reg));
+ EXPECT_EQUAL((u8)reg64, (u8)pattern);
+ /* %al should contain 0x88, while high bits should still hold the rest
+ * of mmio_reg */
+ EXPECT_EQUAL(reg64,
+ ((unsigned long)mmio_reg & ~0xffUL) | (pattern & 0xff));
/* MOVZXB (0f b6), to 64-bit, mod=0, reg=0, rm=3 */
asm volatile("movzxb (%%rbx), %%rax"
Thanks for sorting that out - I though I see my plan "let's make this
instruction parser as dumb as imaginable" floating down the gutter...
Jan
--
Siemens AG, Corporate Technology, CT RDA IOT SES-DE
Corporate Competence Center Embedded Linux
--
You received this message because you are subscribed to the Google Groups
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/jailhouse-dev/685fa0fa-6c99-82f0-2e15-5299e9866e16%40siemens.com.
For more options, visit https://groups.google.com/d/optout.