Signed-off-by: Andrew Cooper
---
CC: Jan Beulich
Name definitely open to improvement. Perhaps better considered in the context
of the following patch.
---
xen/arch/x86/hvm/dom0_build.c | 2 +-
xen/arch/x86/hvm/emulate.c| 40 ++--
xen/arch/x86/hvm/hvm.c| 56 +++
xen/arch/x86/hvm/intercept.c | 20 +++---
xen/arch/x86/hvm/svm/nestedsvm.c | 5 ++--
xen/arch/x86/hvm/svm/svm.c| 2 +-
xen/arch/x86/hvm/viridian.c | 2 +-
xen/arch/x86/hvm/vmsi.c | 2 +-
xen/arch/x86/hvm/vmx/realmode.c | 2 +-
xen/arch/x86/hvm/vmx/vvmx.c | 14 +-
xen/arch/x86/mm/shadow/common.c | 12 -
xen/common/libelf/libelf-loader.c | 4 +--
xen/include/asm-x86/hvm/support.h | 40 ++--
13 files changed, 101 insertions(+), 100 deletions(-)
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 020c355..e8f746c 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -238,7 +238,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct
domain *d)
if ( !pvh_steal_ram(d, HVM_VM86_TSS_SIZE, 128, GB(4), ) )
{
if ( hvm_copy_to_guest_phys(gaddr, NULL, HVM_VM86_TSS_SIZE, v) !=
- HVMCOPY_okay )
+ HVMTRANS_okay )
printk("Unable to zero VM86 TSS area\n");
d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] =
VM86_TSS_UPDATED | ((uint64_t)HVM_VM86_TSS_SIZE << 32) | gaddr;
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 495e312..384ad0b 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -100,7 +100,7 @@ static int ioreq_server_read(const struct hvm_io_handler
*io_handler,
uint32_t size,
uint64_t *data)
{
-if ( hvm_copy_from_guest_phys(data, addr, size) != HVMCOPY_okay )
+if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay )
return X86EMUL_UNHANDLEABLE;
return X86EMUL_OKAY;
@@ -892,18 +892,18 @@ static int __hvmemul_read(
switch ( rc )
{
-case HVMCOPY_okay:
+case HVMTRANS_okay:
break;
-case HVMCOPY_bad_gva_to_gfn:
+case HVMTRANS_bad_linear_to_gfn:
x86_emul_pagefault(pfinfo.ec, pfinfo.linear, _ctxt->ctxt);
return X86EMUL_EXCEPTION;
-case HVMCOPY_bad_gfn_to_mfn:
+case HVMTRANS_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
hvmemul_ctxt, 0);
-case HVMCOPY_gfn_paged_out:
-case HVMCOPY_gfn_shared:
+case HVMTRANS_gfn_paged_out:
+case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
default:
return X86EMUL_UNHANDLEABLE;
@@ -1011,15 +1011,15 @@ static int hvmemul_write(
switch ( rc )
{
-case HVMCOPY_okay:
+case HVMTRANS_okay:
break;
-case HVMCOPY_bad_gva_to_gfn:
+case HVMTRANS_bad_linear_to_gfn:
x86_emul_pagefault(pfinfo.ec, pfinfo.linear, _ctxt->ctxt);
return X86EMUL_EXCEPTION;
-case HVMCOPY_bad_gfn_to_mfn:
+case HVMTRANS_bad_gfn_to_mfn:
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
hvmemul_ctxt, 0);
-case HVMCOPY_gfn_paged_out:
-case HVMCOPY_gfn_shared:
+case HVMTRANS_gfn_paged_out:
+case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
default:
return X86EMUL_UNHANDLEABLE;
@@ -1383,7 +1383,7 @@ static int hvmemul_rep_movs(
return rc;
}
-rc = HVMCOPY_okay;
+rc = HVMTRANS_okay;
}
else
/*
@@ -1393,16 +1393,16 @@ static int hvmemul_rep_movs(
*/
rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
-if ( rc == HVMCOPY_okay )
+if ( rc == HVMTRANS_okay )
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current);
xfree(buf);
-if ( rc == HVMCOPY_gfn_paged_out )
+if ( rc == HVMTRANS_gfn_paged_out )
return X86EMUL_RETRY;
-if ( rc == HVMCOPY_gfn_shared )
+if ( rc == HVMTRANS_gfn_shared )
return X86EMUL_RETRY;
-if ( rc != HVMCOPY_okay )
+if ( rc != HVMTRANS_okay )
{
gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
@@ -1512,10 +1512,10 @@ static int hvmemul_rep_stos(
switch ( rc )
{
-case HVMCOPY_gfn_paged_out:
-case HVMCOPY_gfn_shared:
+case HVMTRANS_gfn_paged_out:
+case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
-case HVMCOPY_okay:
+case HVMTRANS_okay:
return X86EMUL_OKAY;
}
@@ -2171,7 +2171,7 @@ void hvm_emulate_init_per_insn(
) &&