Re: [Xen-devel] [PATCH 5/6] x86/hvm: Break out __hvm_copy()'s translation logic

2017-06-22 Thread Jan Beulich
>>> On 21.06.17 at 17:12,  wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3072,6 +3072,80 @@ void hvm_task_switch(
>  hvm_unmap_entry(nptss_desc);
>  }
>  
> +enum hvm_translation_result hvm_translate_get_page(
> +struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
> +pagefault_info_t *pfinfo, struct page_info **_page,
> +gfn_t *__gfn, p2m_type_t *_p2mt)

Please avoid leading underscores on parameter names (and even
more so double ones). page_p, gfn_p, and p2mt_p perhaps?

> +{
> +struct page_info *page;
> +p2m_type_t p2mt;
> +gfn_t gfn;
> +
> +if ( linear )
> +{
> +gfn = _gfn(paging_gva_to_gfn(v, addr, ));
> +
> +if ( gfn_eq(gfn, INVALID_GFN) )
> +{
> +if ( pfec & PFEC_page_paged )
> +return HVMTRANS_gfn_paged_out;
> +
> +if ( pfec & PFEC_page_shared )
> +return HVMTRANS_gfn_shared;
> +
> +if ( pfinfo )
> +{
> +pfinfo->linear = addr;
> +pfinfo->ec = pfec & ~PFEC_implicit;
> +}
> +
> +return HVMTRANS_bad_linear_to_gfn;
> +}
> +}
> +else
> +gfn = _gfn(addr >> PAGE_SHIFT);

As we risk the caller not recognizing that *pfinfo won't be written
to on this "else" path, wouldn't it be a good idea to ASSERT(!pfinfo)
here?

> +/*
> + * No need to do the P2M lookup for internally handled MMIO, benefiting
> + * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
> + * - newer Windows (like Server 2012) for HPET accesses.
> + */
> +if ( v == current && is_hvm_vcpu(v)

Isn't the is_hvm_vcpu() a stray leftover from the PVHv1 removal,
and hence doesn't need retaining here?

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 5/6] x86/hvm: Break out __hvm_copy()'s translation logic

2017-06-21 Thread Andrew Cooper
It will be reused by later changes.

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
---
 xen/arch/x86/hvm/hvm.c| 141 ++
 xen/include/asm-x86/hvm/support.h |  12 
 2 files changed, 95 insertions(+), 58 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c822d3b..0a5697a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3072,6 +3072,80 @@ void hvm_task_switch(
 hvm_unmap_entry(nptss_desc);
 }
 
+enum hvm_translation_result hvm_translate_get_page(
+struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
+pagefault_info_t *pfinfo, struct page_info **_page,
+gfn_t *__gfn, p2m_type_t *_p2mt)
+{
+struct page_info *page;
+p2m_type_t p2mt;
+gfn_t gfn;
+
+if ( linear )
+{
+gfn = _gfn(paging_gva_to_gfn(v, addr, ));
+
+if ( gfn_eq(gfn, INVALID_GFN) )
+{
+if ( pfec & PFEC_page_paged )
+return HVMTRANS_gfn_paged_out;
+
+if ( pfec & PFEC_page_shared )
+return HVMTRANS_gfn_shared;
+
+if ( pfinfo )
+{
+pfinfo->linear = addr;
+pfinfo->ec = pfec & ~PFEC_implicit;
+}
+
+return HVMTRANS_bad_linear_to_gfn;
+}
+}
+else
+gfn = _gfn(addr >> PAGE_SHIFT);
+
+/*
+ * No need to do the P2M lookup for internally handled MMIO, benefiting
+ * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
+ * - newer Windows (like Server 2012) for HPET accesses.
+ */
+if ( v == current && is_hvm_vcpu(v)
+ && !nestedhvm_vcpu_in_guestmode(v)
+ && hvm_mmio_internal(gfn_x(gfn) << PAGE_SHIFT) )
+return HVMTRANS_bad_gfn_to_mfn;
+
+page = get_page_from_gfn(v->domain, gfn_x(gfn), , P2M_UNSHARE);
+
+if ( !page )
+return HVMTRANS_bad_gfn_to_mfn;
+
+if ( p2m_is_paging(p2mt) )
+{
+put_page(page);
+p2m_mem_paging_populate(v->domain, gfn_x(gfn));
+return HVMTRANS_gfn_paged_out;
+}
+if ( p2m_is_shared(p2mt) )
+{
+put_page(page);
+return HVMTRANS_gfn_shared;
+}
+if ( p2m_is_grant(p2mt) )
+{
+put_page(page);
+return HVMTRANS_unhandleable;
+}
+
+*_page = page;
+if ( __gfn )
+*__gfn = gfn;
+if ( _p2mt )
+*_p2mt = p2mt;
+
+return HVMTRANS_okay;
+}
+
 #define HVMCOPY_from_guest (0u<<0)
 #define HVMCOPY_to_guest   (1u<<0)
 #define HVMCOPY_phys   (0u<<2)
@@ -3080,7 +3154,7 @@ static enum hvm_translation_result __hvm_copy(
 void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
 uint32_t pfec, pagefault_info_t *pfinfo)
 {
-unsigned long gfn;
+gfn_t gfn;
 struct page_info *page;
 p2m_type_t p2mt;
 char *p;
@@ -3106,65 +3180,15 @@ static enum hvm_translation_result __hvm_copy(
 
 while ( todo > 0 )
 {
+enum hvm_translation_result res;
 paddr_t gpa = addr & ~PAGE_MASK;
 
 count = min_t(int, PAGE_SIZE - gpa, todo);
 
-if ( flags & HVMCOPY_linear )
-{
-gfn = paging_gva_to_gfn(v, addr, );
-if ( gfn == gfn_x(INVALID_GFN) )
-{
-if ( pfec & PFEC_page_paged )
-return HVMTRANS_gfn_paged_out;
-if ( pfec & PFEC_page_shared )
-return HVMTRANS_gfn_shared;
-if ( pfinfo )
-{
-pfinfo->linear = addr;
-pfinfo->ec = pfec & ~PFEC_implicit;
-}
-return HVMTRANS_bad_linear_to_gfn;
-}
-gpa |= (paddr_t)gfn << PAGE_SHIFT;
-}
-else
-{
-gfn = addr >> PAGE_SHIFT;
-gpa = addr;
-}
-
-/*
- * No need to do the P2M lookup for internally handled MMIO, benefiting
- * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
- * - newer Windows (like Server 2012) for HPET accesses.
- */
-if ( v == current && is_hvm_vcpu(v)
- && !nestedhvm_vcpu_in_guestmode(v)
- && hvm_mmio_internal(gpa) )
-return HVMTRANS_bad_gfn_to_mfn;
-
-page = get_page_from_gfn(v->domain, gfn, , P2M_UNSHARE);
-
-if ( !page )
-return HVMTRANS_bad_gfn_to_mfn;
-
-if ( p2m_is_paging(p2mt) )
-{
-put_page(page);
-p2m_mem_paging_populate(v->domain, gfn);
-return HVMTRANS_gfn_paged_out;
-}
-if ( p2m_is_shared(p2mt) )
-{
-put_page(page);
-return HVMTRANS_gfn_shared;
-}
-if ( p2m_is_grant(p2mt) )
-{
-put_page(page);
-return HVMTRANS_unhandleable;
-}
+res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear,
+