Re: [Xen-devel] [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero

2015-01-07 Thread Tian, Kevin
 From: Jan Beulich [mailto:jbeul...@suse.com]
 Sent: Wednesday, January 07, 2015 6:16 PM
 
  On 23.12.14 at 07:52, kevin.t...@intel.com wrote:
   From: Jan Beulich [mailto:jbeul...@suse.com]
  Sent: Friday, December 19, 2014 7:26 PM
 
  This can (and will) be legitimately the case when sharing page tables
  with EPT (more of a problem before p2m_access_rwx became zero, but
  still possible even now when other than that is the default for a
  guest), leading to an unconditional crash (in print_vtd_entries())
  when a DMA remapping fault occurs.
 
  could you elaborate the scenarios when bits 52+ are non-zero?
 
 
  Signed-off-by: Jan Beulich jbeul...@suse.com
 
  but the changes looks reasonable to me.
 
  Signed-off-by: Kevin Tian kevin.t...@intel.com
 
 I translated this to a Reviewed-by, as S-o-b doesn't seem to make
 sense here.
 
 Konrad - please indicate whether this can also go into 4.5.
 
 Jan


sorry for typo. it should be Acked-by. :-)

Thanks
Kevin

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero

2015-01-07 Thread Konrad Rzeszutek Wilk
On Wed, Jan 07, 2015 at 10:15:39AM +, Jan Beulich wrote:
  On 23.12.14 at 07:52, kevin.t...@intel.com wrote:
   From: Jan Beulich [mailto:jbeul...@suse.com]
  Sent: Friday, December 19, 2014 7:26 PM
  
  This can (and will) be legitimately the case when sharing page tables
  with EPT (more of a problem before p2m_access_rwx became zero, but
  still possible even now when other than that is the default for a
  guest), leading to an unconditional crash (in print_vtd_entries())
  when a DMA remapping fault occurs.
  
  could you elaborate the scenarios when bits 52+ are non-zero?
  
  
  Signed-off-by: Jan Beulich jbeul...@suse.com
  
  but the changes looks reasonable to me.
  
  Signed-off-by: Kevin Tian kevin.t...@intel.com
 
 I translated this to a Reviewed-by, as S-o-b doesn't seem to make
 sense here.
 
 Konrad - please indicate whether this can also go into 4.5.

Yes. Release-Acked-by: Konrad Rzeszutek Wilk konrad.w...@oracle.com
 
 Jan
 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero

2014-12-23 Thread Jan Beulich
 On 23.12.14 at 07:52, kevin.t...@intel.com wrote:
  From: Jan Beulich [mailto:jbeul...@suse.com]
 Sent: Friday, December 19, 2014 7:26 PM
 
 This can (and will) be legitimately the case when sharing page tables
 with EPT (more of a problem before p2m_access_rwx became zero, but
 still possible even now when other than that is the default for a
 guest), leading to an unconditional crash (in print_vtd_entries())
 when a DMA remapping fault occurs.
 
 could you elaborate the scenarios when bits 52+ are non-zero?

This happens with shared page tables and a non-zero access type.

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero

2014-12-22 Thread Tian, Kevin
 From: Jan Beulich [mailto:jbeul...@suse.com]
 Sent: Friday, December 19, 2014 7:26 PM
 
 This can (and will) be legitimately the case when sharing page tables
 with EPT (more of a problem before p2m_access_rwx became zero, but
 still possible even now when other than that is the default for a
 guest), leading to an unconditional crash (in print_vtd_entries())
 when a DMA remapping fault occurs.

could you elaborate the scenarios when bits 52+ are non-zero?

 
 Signed-off-by: Jan Beulich jbeul...@suse.com

but the changes looks reasonable to me.

Signed-off-by: Kevin Tian kevin.t...@intel.com

 ---
 Regarding the release, if the changes to iommu.c are deemed too
 extensive, then _I think_ they could be split off (that code ought to
 come into play only when not sharing page tables between VT-d and EPT,
 and VT-d code never sets the offending bits to non-zero values afaict).
 
 --- a/xen/drivers/passthrough/vtd/iommu.c
 +++ b/xen/drivers/passthrough/vtd/iommu.c
 @@ -258,8 +258,7 @@ static u64 addr_to_dma_page_maddr(struct
  struct dma_pte *parent, *pte = NULL;
  int level = agaw_to_level(hd-arch.agaw);
  int offset;
 -u64 pte_maddr = 0, maddr;
 -u64 *vaddr = NULL;
 +u64 pte_maddr = 0;
 
  addr = (((u64)1)  addr_width) - 1;
  ASSERT(spin_is_locked(hd-arch.mapping_lock));
 @@ -281,19 +280,19 @@ static u64 addr_to_dma_page_maddr(struct
  offset = address_level_offset(addr, level);
  pte = parent[offset];
 
 -if ( dma_pte_addr(*pte) == 0 )
 +pte_maddr = dma_pte_addr(*pte);
 +if ( !pte_maddr )
  {
  if ( !alloc )
  break;
 
  pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
  drhd = acpi_find_matched_drhd_unit(pdev);
 -maddr = alloc_pgtable_maddr(drhd, 1);
 -if ( !maddr )
 +pte_maddr = alloc_pgtable_maddr(drhd, 1);
 +if ( !pte_maddr )
  break;
 
 -dma_set_pte_addr(*pte, maddr);
 -vaddr = map_vtd_domain_page(maddr);
 +dma_set_pte_addr(*pte, pte_maddr);
 
  /*
   * high level table always sets r/w, last level
 @@ -303,21 +302,12 @@ static u64 addr_to_dma_page_maddr(struct
  dma_set_pte_writable(*pte);
  iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
  }
 -else
 -{
 -vaddr = map_vtd_domain_page(pte-val);
 -}
 
  if ( level == 2 )
 -{
 -pte_maddr = pte-val  PAGE_MASK_4K;
 -unmap_vtd_domain_page(vaddr);
  break;
 -}
 
  unmap_vtd_domain_page(parent);
 -parent = (struct dma_pte *)vaddr;
 -vaddr = NULL;
 +parent = map_vtd_domain_page(pte_maddr);
  level--;
  }
 
 @@ -2449,7 +2439,7 @@ static void vtd_dump_p2m_table_level(pad
  printk(%*sgfn: %08lx mfn: %08lx\n,
 indent, ,
 (unsigned long)(address  PAGE_SHIFT_4K),
 -   (unsigned long)(pte-val  PAGE_SHIFT_4K));
 +   (unsigned long)(dma_pte_addr(*pte) 
 PAGE_SHIFT_4K));
  }
 
  unmap_vtd_domain_page(pt_vaddr);
 --- a/xen/drivers/passthrough/vtd/iommu.h
 +++ b/xen/drivers/passthrough/vtd/iommu.h
 @@ -276,7 +276,7 @@ struct dma_pte {
  #define dma_set_pte_snp(p)  do {(p).val |= DMA_PTE_SNP;} while(0)
  #define dma_set_pte_prot(p, prot) \
  do {(p).val = ((p).val  ~3) | ((prot)  3); } while (0)
 -#define dma_pte_addr(p) ((p).val  PAGE_MASK_4K)
 +#define dma_pte_addr(p) ((p).val  PADDR_MASK  PAGE_MASK_4K)
  #define dma_set_pte_addr(p, addr) do {\
  (p).val |= ((addr)  PAGE_MASK_4K); } while (0)
  #define dma_pte_present(p) (((p).val  3) != 0)
 --- a/xen/drivers/passthrough/vtd/utils.c
 +++ b/xen/drivers/passthrough/vtd/utils.c
 @@ -170,16 +170,16 @@ void print_vtd_entries(struct iommu *iom
  l_index = get_level_index(gmfn, level);
  printk(l%d_index = %x\n, level, l_index);
 
 -pte.val = val = l[l_index];
 +pte.val = l[l_index];
  unmap_vtd_domain_page(l);
 -printk(l%d[%x] = %PRIx64\n, level, l_index, val);
 +printk(l%d[%x] = %PRIx64\n, level, l_index, pte.val);
 
 -pte.val = val;
  if ( !dma_pte_present(pte) )
  {
  printk(l%d[%x] not present\n, level, l_index);
  break;
  }
 +val = dma_pte_addr(pte);
  } while ( --level );
  }
 
 


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero

2014-12-19 Thread Jan Beulich
This can (and will) be legitimately the case when sharing page tables
with EPT (more of a problem before p2m_access_rwx became zero, but
still possible even now when other than that is the default for a
guest), leading to an unconditional crash (in print_vtd_entries())
when a DMA remapping fault occurs.

Signed-off-by: Jan Beulich jbeul...@suse.com
---
Regarding the release, if the changes to iommu.c are deemed too
extensive, then _I think_ they could be split off (that code ought to
come into play only when not sharing page tables between VT-d and EPT,
and VT-d code never sets the offending bits to non-zero values afaict).

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -258,8 +258,7 @@ static u64 addr_to_dma_page_maddr(struct
 struct dma_pte *parent, *pte = NULL;
 int level = agaw_to_level(hd-arch.agaw);
 int offset;
-u64 pte_maddr = 0, maddr;
-u64 *vaddr = NULL;
+u64 pte_maddr = 0;
 
 addr = (((u64)1)  addr_width) - 1;
 ASSERT(spin_is_locked(hd-arch.mapping_lock));
@@ -281,19 +280,19 @@ static u64 addr_to_dma_page_maddr(struct
 offset = address_level_offset(addr, level);
 pte = parent[offset];
 
-if ( dma_pte_addr(*pte) == 0 )
+pte_maddr = dma_pte_addr(*pte);
+if ( !pte_maddr )
 {
 if ( !alloc )
 break;
 
 pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
 drhd = acpi_find_matched_drhd_unit(pdev);
-maddr = alloc_pgtable_maddr(drhd, 1);
-if ( !maddr )
+pte_maddr = alloc_pgtable_maddr(drhd, 1);
+if ( !pte_maddr )
 break;
 
-dma_set_pte_addr(*pte, maddr);
-vaddr = map_vtd_domain_page(maddr);
+dma_set_pte_addr(*pte, pte_maddr);
 
 /*
  * high level table always sets r/w, last level
@@ -303,21 +302,12 @@ static u64 addr_to_dma_page_maddr(struct
 dma_set_pte_writable(*pte);
 iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 }
-else
-{
-vaddr = map_vtd_domain_page(pte-val);
-}
 
 if ( level == 2 )
-{
-pte_maddr = pte-val  PAGE_MASK_4K;
-unmap_vtd_domain_page(vaddr);
 break;
-}
 
 unmap_vtd_domain_page(parent);
-parent = (struct dma_pte *)vaddr;
-vaddr = NULL;
+parent = map_vtd_domain_page(pte_maddr);
 level--;
 }
 
@@ -2449,7 +2439,7 @@ static void vtd_dump_p2m_table_level(pad
 printk(%*sgfn: %08lx mfn: %08lx\n,
indent, ,
(unsigned long)(address  PAGE_SHIFT_4K),
-   (unsigned long)(pte-val  PAGE_SHIFT_4K));
+   (unsigned long)(dma_pte_addr(*pte)  PAGE_SHIFT_4K));
 }
 
 unmap_vtd_domain_page(pt_vaddr);
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -276,7 +276,7 @@ struct dma_pte {
 #define dma_set_pte_snp(p)  do {(p).val |= DMA_PTE_SNP;} while(0)
 #define dma_set_pte_prot(p, prot) \
 do {(p).val = ((p).val  ~3) | ((prot)  3); } while (0)
-#define dma_pte_addr(p) ((p).val  PAGE_MASK_4K)
+#define dma_pte_addr(p) ((p).val  PADDR_MASK  PAGE_MASK_4K)
 #define dma_set_pte_addr(p, addr) do {\
 (p).val |= ((addr)  PAGE_MASK_4K); } while (0)
 #define dma_pte_present(p) (((p).val  3) != 0)
--- a/xen/drivers/passthrough/vtd/utils.c
+++ b/xen/drivers/passthrough/vtd/utils.c
@@ -170,16 +170,16 @@ void print_vtd_entries(struct iommu *iom
 l_index = get_level_index(gmfn, level);
 printk(l%d_index = %x\n, level, l_index);
 
-pte.val = val = l[l_index];
+pte.val = l[l_index];
 unmap_vtd_domain_page(l);
-printk(l%d[%x] = %PRIx64\n, level, l_index, val);
+printk(l%d[%x] = %PRIx64\n, level, l_index, pte.val);
 
-pte.val = val;
 if ( !dma_pte_present(pte) )
 {
 printk(l%d[%x] not present\n, level, l_index);
 break;
 }
+val = dma_pte_addr(pte);
 } while ( --level );
 }
 


VT-d: don't crash when PTE bits 52 and up are non-zero

This can (and will) be legitimately the case when sharing page tables
with EPT (more of a problem before p2m_access_rwx became zero, but
still possible even now when other than that is the default for a
guest), leading to an unconditional crash (in print_vtd_entries())
when a DMA remapping fault occurs.

Signed-off-by: Jan Beulich jbeul...@suse.com
---
Regarding the release, if the changes to iommu.c are deemed too
extensive, then _I think_ they could be split off (that code ought to
come into play only when not sharing page tables between VT-d and EPT,
and VT-d code never sets the offending bits to non-zero values afaict).

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -258,8 +258,7 @@ static u64