Re: [Xen-devel] [RFC PATCH 1/9] x86/HVM: Introduce struct hvm_pi_ops

2016-10-12 Thread Konrad Rzeszutek Wilk
On Mon, Sep 19, 2016 at 12:52:40AM -0500, Suravee Suthikulpanit wrote:
> The current function pointers for managing hvm posted interrupt
> can be used also by SVM AVIC. Therefore, this patch introduces the
> struct hvm_pi_ops in the struct hvm_domain to hold them.
> 
> Signed-off-by: Suravee Suthikulpanit 

You seemed to have forgotten to CC the VMX maintainers:

INTEL(R) VT FOR X86 (VT-X)  
   
M:  Jun Nakajima
   
M:  Kevin Tian
   
S:  Supported   
?

Regardlesss of that, Reviewed-by: Konrad Rzeszutek Wilk 
> ---
>  xen/arch/x86/hvm/vmx/vmx.c | 32 +--
>  xen/include/asm-x86/hvm/domain.h   | 63 
> ++
>  xen/include/asm-x86/hvm/hvm.h  |  4 +--
>  xen/include/asm-x86/hvm/vmx/vmcs.h | 59 ---
>  4 files changed, 81 insertions(+), 77 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 2759e6f..8620697 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -204,12 +204,12 @@ void vmx_pi_hooks_assign(struct domain *d)
>  if ( !iommu_intpost || !has_hvm_container_domain(d) )
>  return;
>  
> -ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
> +ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
>  
> -d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
> -d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
> -d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
> -d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
> +d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
> +d->arch.hvm_domain.pi_ops.pi_switch_from = vmx_pi_switch_from;
> +d->arch.hvm_domain.pi_ops.pi_switch_to = vmx_pi_switch_to;
> +d->arch.hvm_domain.pi_ops.pi_do_resume = vmx_pi_do_resume;
>  }
>  
>  /* This function is called when pcidevs_lock is held */
> @@ -218,12 +218,12 @@ void vmx_pi_hooks_deassign(struct domain *d)
>  if ( !iommu_intpost || !has_hvm_container_domain(d) )
>  return;
>  
> -ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
> +ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
>  
> -d->arch.hvm_domain.vmx.vcpu_block = NULL;
> -d->arch.hvm_domain.vmx.pi_switch_from = NULL;
> -d->arch.hvm_domain.vmx.pi_switch_to = NULL;
> -d->arch.hvm_domain.vmx.pi_do_resume = NULL;
> +d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
> +d->arch.hvm_domain.pi_ops.pi_switch_from = NULL;
> +d->arch.hvm_domain.pi_ops.pi_switch_to = NULL;
> +d->arch.hvm_domain.pi_ops.pi_do_resume = NULL;
>  }
>  
>  static int vmx_domain_initialise(struct domain *d)
> @@ -901,8 +901,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
>  vmx_restore_host_msrs();
>  vmx_save_dr(v);
>  
> -if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
> -v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
> +if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
> +v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
>  }
>  
>  static void vmx_ctxt_switch_to(struct vcpu *v)
> @@ -916,8 +916,8 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
>  vmx_restore_guest_msrs(v);
>  vmx_restore_dr(v);
>  
> -if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
> -v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
> +if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
> +v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
>  }
>  
>  
> @@ -3914,8 +3914,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs 
> *regs)
>  struct hvm_vcpu_asid *p_asid;
>  bool_t need_flush;
>  
> -if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
> -curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
> +if ( curr->domain->arch.hvm_domain.pi_ops.pi_do_resume )
> +curr->domain->arch.hvm_domain.pi_ops.pi_do_resume(curr);
>  
>  if ( !cpu_has_vmx_vpid )
>  goto out;
> diff --git a/xen/include/asm-x86/hvm/domain.h 
> b/xen/include/asm-x86/hvm/domain.h
> index f34d784..779927b 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -72,6 +72,67 @@ struct hvm_ioreq_server {
>  bool_t bufioreq_atomic;
>  };
>  
> +struct hvm_pi_ops {
> +/*
> + * To handle posted interrupts correctly, we need to set the following
> + * state:
> + *
> + * * The PI notification vector (NV)
> + * * The PI notification destination processor (NDST)
> + * * The PI "suppress notification" bit (SN)
> + * * The vcpu pi "blocked" list
> + *
> + * If a VM is currently running, we want the PI delivered to the guest 
> vcpu
> + * on the proper pcpu (NDST = v->processor, SN clear).
> + *
> + * If the vm is blocked, we want the PI delivered to Xen so that it can
> + * wake it up  (SN clear, NV

[Xen-devel] [RFC PATCH 1/9] x86/HVM: Introduce struct hvm_pi_ops

2016-09-18 Thread Suravee Suthikulpanit
The current function pointers for managing hvm posted interrupt
can be used also by SVM AVIC. Therefore, this patch introduces the
struct hvm_pi_ops in the struct hvm_domain to hold them.

Signed-off-by: Suravee Suthikulpanit 
---
 xen/arch/x86/hvm/vmx/vmx.c | 32 +--
 xen/include/asm-x86/hvm/domain.h   | 63 ++
 xen/include/asm-x86/hvm/hvm.h  |  4 +--
 xen/include/asm-x86/hvm/vmx/vmcs.h | 59 ---
 4 files changed, 81 insertions(+), 77 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2759e6f..8620697 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -204,12 +204,12 @@ void vmx_pi_hooks_assign(struct domain *d)
 if ( !iommu_intpost || !has_hvm_container_domain(d) )
 return;
 
-ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
+ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
 
-d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
-d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
-d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
-d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
+d->arch.hvm_domain.pi_ops.pi_switch_from = vmx_pi_switch_from;
+d->arch.hvm_domain.pi_ops.pi_switch_to = vmx_pi_switch_to;
+d->arch.hvm_domain.pi_ops.pi_do_resume = vmx_pi_do_resume;
 }
 
 /* This function is called when pcidevs_lock is held */
@@ -218,12 +218,12 @@ void vmx_pi_hooks_deassign(struct domain *d)
 if ( !iommu_intpost || !has_hvm_container_domain(d) )
 return;
 
-ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
+ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
 
-d->arch.hvm_domain.vmx.vcpu_block = NULL;
-d->arch.hvm_domain.vmx.pi_switch_from = NULL;
-d->arch.hvm_domain.vmx.pi_switch_to = NULL;
-d->arch.hvm_domain.vmx.pi_do_resume = NULL;
+d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
+d->arch.hvm_domain.pi_ops.pi_switch_from = NULL;
+d->arch.hvm_domain.pi_ops.pi_switch_to = NULL;
+d->arch.hvm_domain.pi_ops.pi_do_resume = NULL;
 }
 
 static int vmx_domain_initialise(struct domain *d)
@@ -901,8 +901,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
 vmx_restore_host_msrs();
 vmx_save_dr(v);
 
-if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
-v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
+if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
+v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
 }
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -916,8 +916,8 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
 vmx_restore_guest_msrs(v);
 vmx_restore_dr(v);
 
-if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
-v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
+if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
+v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
 }
 
 
@@ -3914,8 +3914,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
 struct hvm_vcpu_asid *p_asid;
 bool_t need_flush;
 
-if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
-curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
+if ( curr->domain->arch.hvm_domain.pi_ops.pi_do_resume )
+curr->domain->arch.hvm_domain.pi_ops.pi_do_resume(curr);
 
 if ( !cpu_has_vmx_vpid )
 goto out;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f34d784..779927b 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -72,6 +72,67 @@ struct hvm_ioreq_server {
 bool_t bufioreq_atomic;
 };
 
+struct hvm_pi_ops {
+/*
+ * To handle posted interrupts correctly, we need to set the following
+ * state:
+ *
+ * * The PI notification vector (NV)
+ * * The PI notification destination processor (NDST)
+ * * The PI "suppress notification" bit (SN)
+ * * The vcpu pi "blocked" list
+ *
+ * If a VM is currently running, we want the PI delivered to the guest vcpu
+ * on the proper pcpu (NDST = v->processor, SN clear).
+ *
+ * If the vm is blocked, we want the PI delivered to Xen so that it can
+ * wake it up  (SN clear, NV = pi_wakeup_vector, vcpu on block list).
+ *
+ * If the VM is currently either preempted or offline (i.e., not running
+ * because of some reason other than blocking waiting for an interrupt),
+ * there's nothing Xen can do -- we want the interrupt pending bit set in
+ * the guest, but we don't want to bother Xen with an interrupt (SN clear).
+ *
+ * There's a brief window of time between vmx_intr_assist() and checking
+ * softirqs where if an interrupt comes in it may be lost; so we need Xen
+ * to get an interrupt and raise a softirq so that it will go through the
+ * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
+