Re: [Xen-devel] [PATCH v3 22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back

2016-11-30 Thread Paul Durrant
> -Original Message-
> From: Andrew Cooper [mailto:andrew.coop...@citrix.com]
> Sent: 30 November 2016 13:51
> To: Xen-devel 
> Cc: Andrew Cooper ; Paul Durrant
> 
> Subject: [PATCH v3 22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind
> the emulators back
> 
> Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require
> callers
> to inject the pagefault themselves.
> 
> Signed-off-by: Andrew Cooper 
> Acked-by: Tim Deegan 
> Acked-by: Kevin Tian 
> Reviewed-by: Jan Beulich 
> ---
> CC: Paul Durrant 

Reviewed-by: Paul Durrant 

> 
> v3:
>  * Correct patch description
>  * Fix rebasing error over previous TSS series
> ---
>  xen/arch/x86/hvm/emulate.c|  2 ++
>  xen/arch/x86/hvm/hvm.c| 14 --
>  xen/arch/x86/hvm/vmx/vvmx.c   | 20 +++-
>  xen/arch/x86/mm/shadow/common.c   |  1 +
>  xen/include/asm-x86/hvm/support.h |  4 +---
>  5 files changed, 31 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 035b654..ccf3aa2 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -799,6 +799,7 @@ static int __hvmemul_read(
>  case HVMCOPY_okay:
>  break;
>  case HVMCOPY_bad_gva_to_gfn:
> +x86_emul_pagefault(pfinfo.ec, pfinfo.linear, _ctxt->ctxt);
>  return X86EMUL_EXCEPTION;
>  case HVMCOPY_bad_gfn_to_mfn:
>  if ( access_type == hvm_access_insn_fetch )
> @@ -905,6 +906,7 @@ static int hvmemul_write(
>  case HVMCOPY_okay:
>  break;
>  case HVMCOPY_bad_gva_to_gfn:
> +x86_emul_pagefault(pfinfo.ec, pfinfo.linear, _ctxt->ctxt);
>  return X86EMUL_EXCEPTION;
>  case HVMCOPY_bad_gfn_to_mfn:
>  return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 37eaee2..3596f2c 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2927,6 +2927,8 @@ void hvm_task_switch(
> 
>  rc = hvm_copy_from_guest_linear(
>  , prev_tr.base, sizeof(tss), PFEC_page_present, );
> +if ( rc == HVMCOPY_bad_gva_to_gfn )
> +hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>  if ( rc != HVMCOPY_okay )
>  goto out;
> 
> @@ -2965,11 +2967,15 @@ void hvm_task_switch(
>offsetof(typeof(tss), trace) -
>offsetof(typeof(tss), eip),
>PFEC_page_present, );
> +if ( rc == HVMCOPY_bad_gva_to_gfn )
> +hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>  if ( rc != HVMCOPY_okay )
>  goto out;
> 
>  rc = hvm_copy_from_guest_linear(
>  , tr.base, sizeof(tss), PFEC_page_present, );
> +if ( rc == HVMCOPY_bad_gva_to_gfn )
> +hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>  /*
>   * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
>   * functions knew we want RO access.
> @@ -3012,7 +3018,10 @@ void hvm_task_switch(
>_link, sizeof(tss.back_link), 
> 0,
>);
>  if ( rc == HVMCOPY_bad_gva_to_gfn )
> +{
> +hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>  exn_raised = 1;
> +}
>  else if ( rc != HVMCOPY_okay )
>  goto out;
>  }
> @@ -3050,7 +3059,10 @@ void hvm_task_switch(
>  rc = hvm_copy_to_guest_linear(linear_addr, , opsz, 0,
>);
>  if ( rc == HVMCOPY_bad_gva_to_gfn )
> +{
> +hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>  exn_raised = 1;
> +}
>  else if ( rc != HVMCOPY_okay )
>  goto out;
>  }
> @@ -3114,8 +3126,6 @@ static enum hvm_copy_result __hvm_copy(
>  {
>  pfinfo->linear = addr;
>  pfinfo->ec = pfec;
> -
> -hvm_inject_page_fault(pfec, addr);
>  }
>  return HVMCOPY_bad_gva_to_gfn;
>  }
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index fd7ea0a..e6e9ebd 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -396,7 +396,6 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>  struct vcpu *v = current;
>  union vmx_inst_info info;
>  struct segment_register seg;
> -pagefault_info_t pfinfo;
>  unsigned long base, index, seg_base, disp, offset;
>  int scale, size;
> 
> @@ -451,10 +450,17 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>offset + 

[Xen-devel] [PATCH v3 22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back

2016-11-30 Thread Andrew Cooper
Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require callers
to inject the pagefault themselves.

Signed-off-by: Andrew Cooper 
Acked-by: Tim Deegan 
Acked-by: Kevin Tian 
Reviewed-by: Jan Beulich 
---
CC: Paul Durrant 

v3:
 * Correct patch description
 * Fix rebasing error over previous TSS series
---
 xen/arch/x86/hvm/emulate.c|  2 ++
 xen/arch/x86/hvm/hvm.c| 14 --
 xen/arch/x86/hvm/vmx/vvmx.c   | 20 +++-
 xen/arch/x86/mm/shadow/common.c   |  1 +
 xen/include/asm-x86/hvm/support.h |  4 +---
 5 files changed, 31 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 035b654..ccf3aa2 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -799,6 +799,7 @@ static int __hvmemul_read(
 case HVMCOPY_okay:
 break;
 case HVMCOPY_bad_gva_to_gfn:
+x86_emul_pagefault(pfinfo.ec, pfinfo.linear, _ctxt->ctxt);
 return X86EMUL_EXCEPTION;
 case HVMCOPY_bad_gfn_to_mfn:
 if ( access_type == hvm_access_insn_fetch )
@@ -905,6 +906,7 @@ static int hvmemul_write(
 case HVMCOPY_okay:
 break;
 case HVMCOPY_bad_gva_to_gfn:
+x86_emul_pagefault(pfinfo.ec, pfinfo.linear, _ctxt->ctxt);
 return X86EMUL_EXCEPTION;
 case HVMCOPY_bad_gfn_to_mfn:
 return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, 
hvmemul_ctxt, 0);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 37eaee2..3596f2c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2927,6 +2927,8 @@ void hvm_task_switch(
 
 rc = hvm_copy_from_guest_linear(
 , prev_tr.base, sizeof(tss), PFEC_page_present, );
+if ( rc == HVMCOPY_bad_gva_to_gfn )
+hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
 if ( rc != HVMCOPY_okay )
 goto out;
 
@@ -2965,11 +2967,15 @@ void hvm_task_switch(
   offsetof(typeof(tss), trace) -
   offsetof(typeof(tss), eip),
   PFEC_page_present, );
+if ( rc == HVMCOPY_bad_gva_to_gfn )
+hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
 if ( rc != HVMCOPY_okay )
 goto out;
 
 rc = hvm_copy_from_guest_linear(
 , tr.base, sizeof(tss), PFEC_page_present, );
+if ( rc == HVMCOPY_bad_gva_to_gfn )
+hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
 /*
  * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
  * functions knew we want RO access.
@@ -3012,7 +3018,10 @@ void hvm_task_switch(
   _link, sizeof(tss.back_link), 0,
   );
 if ( rc == HVMCOPY_bad_gva_to_gfn )
+{
+hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
 exn_raised = 1;
+}
 else if ( rc != HVMCOPY_okay )
 goto out;
 }
@@ -3050,7 +3059,10 @@ void hvm_task_switch(
 rc = hvm_copy_to_guest_linear(linear_addr, , opsz, 0,
   );
 if ( rc == HVMCOPY_bad_gva_to_gfn )
+{
+hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
 exn_raised = 1;
+}
 else if ( rc != HVMCOPY_okay )
 goto out;
 }
@@ -3114,8 +3126,6 @@ static enum hvm_copy_result __hvm_copy(
 {
 pfinfo->linear = addr;
 pfinfo->ec = pfec;
-
-hvm_inject_page_fault(pfec, addr);
 }
 return HVMCOPY_bad_gva_to_gfn;
 }
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index fd7ea0a..e6e9ebd 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -396,7 +396,6 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
 struct vcpu *v = current;
 union vmx_inst_info info;
 struct segment_register seg;
-pagefault_info_t pfinfo;
 unsigned long base, index, seg_base, disp, offset;
 int scale, size;
 
@@ -451,10 +450,17 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
   offset + size - 1 > seg.limit) )
 goto gp_fault;
 
-if ( poperandS != NULL &&
- hvm_copy_from_guest_linear(poperandS, base, size, 0, )
-  != HVMCOPY_okay )
-return X86EMUL_EXCEPTION;
+if ( poperandS != NULL )
+{
+pagefault_info_t pfinfo;
+int rc = hvm_copy_from_guest_linear(poperandS, base, size,
+0, );
+
+if ( rc == HVMCOPY_bad_gva_to_gfn )
+hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
+if ( rc != HVMCOPY_okay )
+return X86EMUL_EXCEPTION;