movzx is a move with zero-extend. This means, it will either move 1 or 2 byte,
and zeroes the rest of the register. The definition of the rest of the
register depends on the operand size override prefix:

  - If OP SZ is not set, always zero the whole register, independent of rex_w.
    This mean all bits can be zeroed if the destination is eax or rax. No need
    to set the preserve mask

  - OP SZ is only set if ax is used. This is the only remaining case.

    The preserve mask then depends on the width of the access. In case of B,
    zero bits 8-15, and preserve 16-63. In case of W, zero nothing, but
    preserve 16-63.

Once this pattern is clear, the fix in the hypervisor is straight forward.

Amend existing and add new test cases that test correct behaviour.

Signed-off-by: Ralf Ramsauer <[email protected]>
---

I'm starting to get frustrated with x86. I thought I catched all
relevant cases, but x86 provides enough complexity for a bunch of corner
cases...

 hypervisor/arch/x86/mmio.c         | 11 ++++-
 inmates/tests/x86/mmio-access-32.c | 24 +++++++----
 inmates/tests/x86/mmio-access.c    | 67 ++++++++++++++++++++++++------
 3 files changed, 80 insertions(+), 22 deletions(-)

diff --git a/hypervisor/arch/x86/mmio.c b/hypervisor/arch/x86/mmio.c
index 124f9966..76d70053 100644
--- a/hypervisor/arch/x86/mmio.c
+++ b/hypervisor/arch/x86/mmio.c
@@ -55,6 +55,7 @@ struct parse_context {
        bool has_rex_r;
        bool has_addrsz_prefix;
        bool has_opsz_prefix;
+       bool zero_extend;
 };
 
 static bool ctx_update(struct parse_context *ctx, u64 *pc, unsigned int 
advance,
@@ -144,6 +145,7 @@ restart:
                ctx.has_opsz_prefix = true;
                goto restart;
        case X86_OP_MOVZX_OPC1:
+               ctx.zero_extend = true;
                if (!ctx_update(&ctx, &pc, 1, pg_structs))
                        goto error_noinst;
                op[1].raw = *ctx.inst;
@@ -191,8 +193,13 @@ restart:
 
        op[2].raw = *ctx.inst;
 
-       if (!ctx.does_write && inst.access_size < 4)
-               inst.reg_preserve_mask = ~BYTE_MASK(inst.access_size);
+       if (!ctx.does_write) {
+               if(!ctx.zero_extend && inst.access_size < 4)
+                       inst.reg_preserve_mask = ~BYTE_MASK(inst.access_size);
+               else if (ctx.zero_extend && ctx.has_opsz_prefix)
+                       inst.reg_preserve_mask =
+                               ~BYTE_MASK(inst.access_size ^ 0x3);
+       }
 
        /* ensure that we are actually talking about mov imm,<mem> */
        if (op[0].raw == X86_OP_MOV_IMMEDIATE_TO_MEM && op[2].modrm.reg != 0)
diff --git a/inmates/tests/x86/mmio-access-32.c 
b/inmates/tests/x86/mmio-access-32.c
index db743410..756d3947 100644
--- a/inmates/tests/x86/mmio-access-32.c
+++ b/inmates/tests/x86/mmio-access-32.c
@@ -64,15 +64,23 @@ void inmate_main(void)
        EXPECT_EQUAL(reg32,
                     ((unsigned long)mmio_reg & ~0xffUL) | (pattern & 0xff));
 
-       /* MOVZXB (0f b6), 32-bit data, 32-bit address */
-       asm volatile("movzxb (%%ebx), %%eax"
-               : "=a" (reg32) : "a" (0), "b" (mmio_reg));
-       EXPECT_EQUAL(reg32, (u8)pattern);
+       /* MOVZXB (0f b6), 8-bit data, 32-bit address, zero extend bits 8-31 */
+       asm volatile("movzxb (%%eax), %%eax"
+               : "=a" (reg32) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg32, pattern & 0xff);
 
-       /* MOVZXW (0f b7) */
-       asm volatile("movzxw (%%ebx), %%eax"
-               : "=a" (reg32) : "a" (0), "b" (mmio_reg));
-       EXPECT_EQUAL(reg32, (u16)pattern);
+       /* MOVZXB (0f b6), 8-bit data, 32-bit address, zero extend bits 8-16,
+        * operand size prefix */
+       asm volatile("movzxb (%%eax), %%ax"
+               : "=a" (reg32) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg32,
+                    ((unsigned long)mmio_reg & ~0xffff) | (pattern & 0xff));
+
+       /* MOVZXW (67 0f b7), 16-bit data, 32-bit address, zero extend bits 
16-31,
+        * AD SZ prefix */
+       asm volatile("movzxw (%%eax), %%eax"
+               : "=a" (reg32) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg32, pattern & 0xffff);
 
        /* MEM_TO_AX (a1), 32-bit data, 32-bit address */
        asm volatile("mov (0x101ff8), %%eax"
diff --git a/inmates/tests/x86/mmio-access.c b/inmates/tests/x86/mmio-access.c
index a17455b0..18eab3a5 100644
--- a/inmates/tests/x86/mmio-access.c
+++ b/inmates/tests/x86/mmio-access.c
@@ -84,20 +84,63 @@ void inmate_main(void)
        EXPECT_EQUAL(reg64,
                     ((unsigned long)mmio_reg & ~0xffUL) | (pattern & 0xff));
 
-       /* MOVZXB (0f b6), to 64-bit, mod=0, reg=0, rm=3 */
-       asm volatile("movzxb (%%rbx), %%rax"
-               : "=a" (reg64) : "a" (0), "b" (mmio_reg));
-       EXPECT_EQUAL(reg64, (u8)pattern);
+       /* MOVZXB (48 0f b6), to 64-bit, mod=0, reg=0, rm=3 */
+       asm volatile("movzxb (%%rax), %%rax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xff);
 
-       /* MOVZXB (0f b6), 32-bit data, 32-bit address */
-       asm volatile("movzxb (%%ebx), %%eax"
-               : "=a" (reg64) : "a" (0), "b" (mmio_reg));
-       EXPECT_EQUAL(reg64, (u8)pattern);
+       /* MOVZXB (0f b6), to 32-bit, clear bits 31-63 */
+       asm volatile("movzxb (%%rax), %%eax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xff);
 
-       /* MOVZXW (0f b7) */
-       asm volatile("movzxw (%%rbx), %%rax"
-               : "=a" (reg64) : "a" (0), "b" (mmio_reg));
-       EXPECT_EQUAL(reg64, (u16)pattern);
+       /* MOVZXB (66 0f b6), to 32-bit, clear bits 8-16, keep 17-73,
+        * operand size prefix */
+       asm volatile("movzxb (%%rax), %%ax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64,
+                    ((unsigned long)mmio_reg & ~0xffffUL) | (pattern & 0xff));
+
+       /* MOVZXB (67 0f b6), 8-bit data, clear bits 8-63, 32-bit address,
+        * AD SZ override prefix */
+       asm volatile("movzxb (%%eax), %%rax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xff);
+
+       /* MOVZXB (67 0f b6), 8-bit data, clear bits 8-63, 32-bit address,
+        * AD SZ override prefix */
+       asm volatile("movzxb (%%eax), %%eax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xff);
+
+       /* MOVZXB (67 0f b6), 8-bit data, clear bits 8-16, keep 17-73,
+        * 32-bit address, AD SZ override prefix, OP SZ override prefix */
+       asm volatile("movzxb (%%eax), %%ax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64,
+                    ((unsigned long)mmio_reg & ~0xffffUL) | (pattern & 0xff));
+
+       /* MOVZXW (48 0f b7), 16-bit data, clear bits 16-63, 64-bit address */
+       asm volatile("movzxw (%%rax), %%rax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xffff);
+
+       /* MOVZXW (0f b7), 16-bit data, clear bits 16-63, 64-bit address */
+       asm volatile("movzxw (%%rax), %%eax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xffff);
+
+       /* MOVZXW (67 48 0f b7), 16-bit data, clear bits 16-63, 32-bit address,
+        * AD SZ prefix */
+       asm volatile("movzxw (%%eax), %%rax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xffff);
+
+       /* MOVZXW (67 0f b7), 16-bit data, clear bits 16-63, 32-bit address,
+        * AD SZ prefix */
+       asm volatile("movzxw (%%eax), %%eax"
+               : "=a" (reg64) : "a" (mmio_reg));
+       EXPECT_EQUAL(reg64, pattern & 0xffff);
 
        /* MEM_TO_AX (a1), 64-bit data, 64-bit address */
        asm volatile("movabs (0x101ff8), %%rax"
-- 
2.22.0

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/20190612190035.16171-1-ralf.ramsauer%40oth-regensburg.de.
For more options, visit https://groups.google.com/d/optout.

Reply via email to