[Xen-ia64-devel] Re: [IA64] Enhance vt-d support for ia64
On Mon, Feb 16, 2009 at 06:33:45PM +0800, Zhang, Xiantao wrote: Hi, Isaku This patch targets for enhancing vt-d support for ia64, could you help to review? It looks mostly good. How about moving arch independent MSI related definitions into a common header file instead of duplicating with xen/include/asm-ia64/msi.h? thanks, Xiantao # HG changeset patch # User xian...@vti-build.sh.intel.com # Date 1234244822 -28800 # Node ID 67f2e14613efc0a18924fd60f2561999b9f59a43 # Parent 4fd4dcf2f8916ab4656911a76e52fc6b1ad42c2f [IA64] Enhance vt-d support for ia64 Signed-off-by: Xiantao Zhang xiantao.zh...@intel.com diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/arch/ia64/xen/domain.c Tue Feb 10 13:47:02 2009 +0800 @@ -1999,6 +1999,7 @@ static void __init calc_dom0_size(void) unsigned long p2m_pages; unsigned long spare_hv_pages; unsigned long max_dom0_size; + unsigned long iommu_pg_table_pages = 0; /* Estimate maximum memory we can safely allocate for dom0 * by subtracting the p2m table allocation and a chunk of memory @@ -2009,8 +2010,13 @@ static void __init calc_dom0_size(void) domheap_pages = avail_domheap_pages(); p2m_pages = domheap_pages / PTRS_PER_PTE; spare_hv_pages = 8192 + (domheap_pages / 4096); - max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages)) - * PAGE_SIZE; + + if (iommu_enabled) + iommu_pg_table_pages = domheap_pages * 4 / 512; + /* There are 512 ptes in one 4K vtd page. */ + + max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) - + iommu_pg_table_pages) * PAGE_SIZE; printk(Maximum permitted dom0 size: %luMB\n, max_dom0_size / (1024*1024)); diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/ia64/vtd.c --- a/xen/drivers/passthrough/vtd/ia64/vtd.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Feb 10 13:47:02 2009 +0800 @@ -111,3 +111,34 @@ void hvm_dpci_isairq_eoi(struct domain * { /* dummy */ } + +static int dom0_set_iommu_mapping(unsigned long start, unsigned long end, + void *arg) +{ +unsigned long tmp, pfn, j, page_addr = start; +struct domain *d = (struct domain *)arg; + +/* + * Set up 1:1 page table for dom0 except the critical segments + * like Xen and tboot. + */ + +while (page_addr end) +{ + if (xen_in_range(page_addr, page_addr + PAGE_SIZE)) +continue; + +pfn = page_addr PAGE_SHIFT; +tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); +for ( j = 0; j tmp; j++ ) +iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j)); + + page_addr += PAGE_SIZE; +} +} + +void iommu_dom0_do_mapping(struct domain *d) +{ + BUG_ON(d != dom0); + efi_memmap_walk(dom0_set_iommu_mapping, d); +} diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/iommu.c Tue Feb 10 13:47:02 2009 +0800 @@ -829,7 +829,6 @@ static void dma_msi_data_init(struct iom spin_unlock_irqrestore(iommu-register_lock, flags); } -#ifdef SUPPORT_MSI_REMAPPING static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) { u64 msi_address; @@ -846,12 +845,6 @@ static void dma_msi_addr_init(struct iom dmar_writel(iommu-reg, DMAR_FEUADDR_REG, (u32)(msi_address 32)); spin_unlock_irqrestore(iommu-register_lock, flags); } -#else -static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) -{ -/* ia64: TODO */ -} -#endif static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest) { @@ -993,24 +986,7 @@ static int intel_iommu_domain_init(struc if ( d-domain_id == 0 ) { -extern int xen_in_range(paddr_t start, paddr_t end); -extern int tboot_in_range(paddr_t start, paddr_t end); - -/* - * Set up 1:1 page table for dom0 except the critical segments - * like Xen and tboot. - */ -for ( i = 0; i max_page; i++ ) -{ -if ( xen_in_range(i PAGE_SHIFT, (i + 1) PAGE_SHIFT) || - tboot_in_range(i PAGE_SHIFT, (i + 1) PAGE_SHIFT) ) -continue; - -tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); -for ( j = 0; j tmp; j++ ) -iommu_map_page(d, (i*tmp+j), (i*tmp+j)); -} - + iommu_dom0_do_mapping(d); setup_dom0_devices(d); setup_dom0_rmrr(d); diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/x86/vtd.c --- a/xen/drivers/passthrough/vtd/x86/vtd.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c
[Xen-ia64-devel] RE: [IA64] Enhance vt-d support for ia64
Isaku Yamahata wrote: On Mon, Feb 16, 2009 at 06:33:45PM +0800, Zhang, Xiantao wrote: Hi, Isaku This patch targets for enhancing vt-d support for ia64, could you help to review? It looks mostly good. How about moving arch independent MSI related definitions into a common header file instead of duplicating with xen/include/asm-ia64/msi.h? Good suggestion! But ia64 has different value for one macro with x86 side, so maybe not proper to move them into common header file. You know, in linux kernel, ia64 and X86 also duplicate them to solve the differences. :) Xiantao Xiantao # HG changeset patch # User xian...@vti-build.sh.intel.com # Date 1234244822 -28800 # Node ID 67f2e14613efc0a18924fd60f2561999b9f59a43 # Parent 4fd4dcf2f8916ab4656911a76e52fc6b1ad42c2f [IA64] Enhance vt-d support for ia64 Signed-off-by: Xiantao Zhang xiantao.zh...@intel.com diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/arch/ia64/xen/domain.c Tue Feb 10 13:47:02 2009 +0800 @@ -1999,6 +1999,7 @@ static void __init calc_dom0_size(void) unsigned long p2m_pages; unsigned long spare_hv_pages; unsigned long max_dom0_size; +unsigned long iommu_pg_table_pages = 0; /* Estimate maximum memory we can safely allocate for dom0 * by subtracting the p2m table allocation and a chunk of memory @@ -2009,8 +2010,13 @@ static void __init calc_dom0_size(void) domheap_pages = avail_domheap_pages(); p2m_pages = domheap_pages / PTRS_PER_PTE; spare_hv_pages = 8192 + (domheap_pages / 4096); -max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages)) - * PAGE_SIZE; + +if (iommu_enabled) +iommu_pg_table_pages = domheap_pages * 4 / 512; +/* There are 512 ptes in one 4K vtd page. */ + +max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) - +iommu_pg_table_pages) * PAGE_SIZE; printk(Maximum permitted dom0 size: %luMB\n, max_dom0_size / (1024*1024)); diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/ia64/vtd.c --- a/xen/drivers/passthrough/vtd/ia64/vtd.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Feb 10 13:47:02 2009 +0800 @@ -111,3 +111,34 @@ void hvm_dpci_isairq_eoi(struct domain * { /* dummy */ } + +static int dom0_set_iommu_mapping(unsigned long start, unsigned long end, + void *arg) +{ +unsigned long tmp, pfn, j, page_addr = start; +struct domain *d = (struct domain *)arg; + +/* + * Set up 1:1 page table for dom0 except the critical segments + * like Xen and tboot. + */ + +while (page_addr end) +{ +if (xen_in_range(page_addr, page_addr + PAGE_SIZE)) + continue; + +pfn = page_addr PAGE_SHIFT; +tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); +for ( j = 0; j tmp; j++ ) +iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j)); + +page_addr += PAGE_SIZE; +} +} + +void iommu_dom0_do_mapping(struct domain *d) +{ +BUG_ON(d != dom0); +efi_memmap_walk(dom0_set_iommu_mapping, d); +} diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.cWed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/iommu.cTue Feb 10 13:47:02 2009 +0800 @@ -829,7 +829,6 @@ static void dma_msi_data_init(struct iom spin_unlock_irqrestore(iommu-register_lock, flags); } -#ifdef SUPPORT_MSI_REMAPPING static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) { u64 msi_address; @@ -846,12 +845,6 @@ static void dma_msi_addr_init(struct iom dmar_writel(iommu-reg, DMAR_FEUADDR_REG, (u32)(msi_address 32)); spin_unlock_irqrestore(iommu-register_lock, flags); } -#else -static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) -{ -/* ia64: TODO */ -} -#endif static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest) { @@ -993,24 +986,7 @@ static int intel_iommu_domain_init(struc if ( d-domain_id == 0 ) { -extern int xen_in_range(paddr_t start, paddr_t end); -extern int tboot_in_range(paddr_t start, paddr_t end); - -/* - * Set up 1:1 page table for dom0 except the critical segments - * like Xen and tboot. - */ -for ( i = 0; i max_page; i++ ) -{ -if ( xen_in_range(i PAGE_SHIFT, (i + 1) PAGE_SHIFT) || - tboot_in_range(i PAGE_SHIFT, (i + 1) PAGE_SHIFT) ) -continue; - -tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); -for ( j = 0; j tmp; j++ ) -iommu_map_page(d, (i*tmp+j), (i*tmp+j)); -} - +iommu_dom0_do_mapping(d); setup_dom0_devices(d);
[Xen-ia64-devel] Re: [IA64] Enhance vt-d support for ia64
On Tue, Feb 17, 2009 at 09:42:05AM +0800, Zhang, Xiantao wrote: Isaku Yamahata wrote: On Mon, Feb 16, 2009 at 06:33:45PM +0800, Zhang, Xiantao wrote: Hi, Isaku This patch targets for enhancing vt-d support for ia64, could you help to review? It looks mostly good. How about moving arch independent MSI related definitions into a common header file instead of duplicating with xen/include/asm-ia64/msi.h? Good suggestion! But ia64 has different value for one macro with x86 side, so maybe not proper to move them into common header file. You know, in linux kernel, ia64 and X86 also duplicate them to solve the differences. :) Oh I see. Sounds reasonable. Xiantao Xiantao # HG changeset patch # User xian...@vti-build.sh.intel.com # Date 1234244822 -28800 # Node ID 67f2e14613efc0a18924fd60f2561999b9f59a43 # Parent 4fd4dcf2f8916ab4656911a76e52fc6b1ad42c2f [IA64] Enhance vt-d support for ia64 Signed-off-by: Xiantao Zhang xiantao.zh...@intel.com diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/arch/ia64/xen/domain.c Tue Feb 10 13:47:02 2009 +0800 @@ -1999,6 +1999,7 @@ static void __init calc_dom0_size(void) unsigned long p2m_pages; unsigned long spare_hv_pages; unsigned long max_dom0_size; + unsigned long iommu_pg_table_pages = 0; /* Estimate maximum memory we can safely allocate for dom0 * by subtracting the p2m table allocation and a chunk of memory @@ -2009,8 +2010,13 @@ static void __init calc_dom0_size(void) domheap_pages = avail_domheap_pages(); p2m_pages = domheap_pages / PTRS_PER_PTE; spare_hv_pages = 8192 + (domheap_pages / 4096); - max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages)) - * PAGE_SIZE; + + if (iommu_enabled) + iommu_pg_table_pages = domheap_pages * 4 / 512; + /* There are 512 ptes in one 4K vtd page. */ + + max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) - + iommu_pg_table_pages) * PAGE_SIZE; printk(Maximum permitted dom0 size: %luMB\n, max_dom0_size / (1024*1024)); diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/ia64/vtd.c --- a/xen/drivers/passthrough/vtd/ia64/vtd.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Feb 10 13:47:02 2009 +0800 @@ -111,3 +111,34 @@ void hvm_dpci_isairq_eoi(struct domain * { /* dummy */ } + +static int dom0_set_iommu_mapping(unsigned long start, unsigned long end, +void *arg) +{ +unsigned long tmp, pfn, j, page_addr = start; +struct domain *d = (struct domain *)arg; + +/* + * Set up 1:1 page table for dom0 except the critical segments + * like Xen and tboot. + */ + +while (page_addr end) +{ + if (xen_in_range(page_addr, page_addr + PAGE_SIZE)) + continue; + +pfn = page_addr PAGE_SHIFT; +tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); +for ( j = 0; j tmp; j++ ) +iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j)); + + page_addr += PAGE_SIZE; +} +} + +void iommu_dom0_do_mapping(struct domain *d) +{ + BUG_ON(d != dom0); + efi_memmap_walk(dom0_set_iommu_mapping, d); +} diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/iommu.c Tue Feb 10 13:47:02 2009 +0800 @@ -829,7 +829,6 @@ static void dma_msi_data_init(struct iom spin_unlock_irqrestore(iommu-register_lock, flags); } -#ifdef SUPPORT_MSI_REMAPPING static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) { u64 msi_address; @@ -846,12 +845,6 @@ static void dma_msi_addr_init(struct iom dmar_writel(iommu-reg, DMAR_FEUADDR_REG, (u32)(msi_address 32)); spin_unlock_irqrestore(iommu-register_lock, flags); } -#else -static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) -{ -/* ia64: TODO */ -} -#endif static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest) { @@ -993,24 +986,7 @@ static int intel_iommu_domain_init(struc if ( d-domain_id == 0 ) { -extern int xen_in_range(paddr_t start, paddr_t end); -extern int tboot_in_range(paddr_t start, paddr_t end); - -/* - * Set up 1:1 page table for dom0 except the critical segments - * like Xen and tboot. - */ -for ( i = 0; i max_page; i++ ) -{ -if ( xen_in_range(i PAGE_SHIFT, (i + 1) PAGE_SHIFT) || - tboot_in_range(i PAGE_SHIFT, (i + 1) PAGE_SHIFT) ) -continue; - -tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); -for
[Xen-ia64-devel] Re: [IA64] Enhance vt-d support for ia64
The patch touches the common/x86 part. So please sent it to xen-devel, then I'll pull it. Acked-by: Isaku Yamahata yamah...@valinux.co.jp On Tue, Feb 17, 2009 at 10:57:21AM +0900, Isaku Yamahata wrote: On Tue, Feb 17, 2009 at 09:42:05AM +0800, Zhang, Xiantao wrote: Isaku Yamahata wrote: On Mon, Feb 16, 2009 at 06:33:45PM +0800, Zhang, Xiantao wrote: Hi, Isaku This patch targets for enhancing vt-d support for ia64, could you help to review? It looks mostly good. How about moving arch independent MSI related definitions into a common header file instead of duplicating with xen/include/asm-ia64/msi.h? Good suggestion! But ia64 has different value for one macro with x86 side, so maybe not proper to move them into common header file. You know, in linux kernel, ia64 and X86 also duplicate them to solve the differences. :) Oh I see. Sounds reasonable. Xiantao Xiantao # HG changeset patch # User xian...@vti-build.sh.intel.com # Date 1234244822 -28800 # Node ID 67f2e14613efc0a18924fd60f2561999b9f59a43 # Parent 4fd4dcf2f8916ab4656911a76e52fc6b1ad42c2f [IA64] Enhance vt-d support for ia64 Signed-off-by: Xiantao Zhang xiantao.zh...@intel.com diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/arch/ia64/xen/domain.c Tue Feb 10 13:47:02 2009 +0800 @@ -1999,6 +1999,7 @@ static void __init calc_dom0_size(void) unsigned long p2m_pages; unsigned long spare_hv_pages; unsigned long max_dom0_size; +unsigned long iommu_pg_table_pages = 0; /* Estimate maximum memory we can safely allocate for dom0 * by subtracting the p2m table allocation and a chunk of memory @@ -2009,8 +2010,13 @@ static void __init calc_dom0_size(void) domheap_pages = avail_domheap_pages(); p2m_pages = domheap_pages / PTRS_PER_PTE; spare_hv_pages = 8192 + (domheap_pages / 4096); -max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages)) - * PAGE_SIZE; + +if (iommu_enabled) +iommu_pg_table_pages = domheap_pages * 4 / 512; +/* There are 512 ptes in one 4K vtd page. */ + +max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) - +iommu_pg_table_pages) * PAGE_SIZE; printk(Maximum permitted dom0 size: %luMB\n, max_dom0_size / (1024*1024)); diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/ia64/vtd.c --- a/xen/drivers/passthrough/vtd/ia64/vtd.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Feb 10 13:47:02 2009 +0800 @@ -111,3 +111,34 @@ void hvm_dpci_isairq_eoi(struct domain * { /* dummy */ } + +static int dom0_set_iommu_mapping(unsigned long start, unsigned long end, + void *arg) +{ +unsigned long tmp, pfn, j, page_addr = start; +struct domain *d = (struct domain *)arg; + +/* + * Set up 1:1 page table for dom0 except the critical segments + * like Xen and tboot. + */ + +while (page_addr end) +{ +if (xen_in_range(page_addr, page_addr + PAGE_SIZE)) + continue; + +pfn = page_addr PAGE_SHIFT; +tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); +for ( j = 0; j tmp; j++ ) +iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j)); + +page_addr += PAGE_SIZE; +} +} + +void iommu_dom0_do_mapping(struct domain *d) +{ +BUG_ON(d != dom0); +efi_memmap_walk(dom0_set_iommu_mapping, d); +} diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.cWed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/iommu.cTue Feb 10 13:47:02 2009 +0800 @@ -829,7 +829,6 @@ static void dma_msi_data_init(struct iom spin_unlock_irqrestore(iommu-register_lock, flags); } -#ifdef SUPPORT_MSI_REMAPPING static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) { u64 msi_address; @@ -846,12 +845,6 @@ static void dma_msi_addr_init(struct iom dmar_writel(iommu-reg, DMAR_FEUADDR_REG, (u32)(msi_address 32)); spin_unlock_irqrestore(iommu-register_lock, flags); } -#else -static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) -{ -/* ia64: TODO */ -} -#endif static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest) { @@ -993,24 +986,7 @@ static int intel_iommu_domain_init(struc if ( d-domain_id == 0 ) { -extern int xen_in_range(paddr_t start, paddr_t end); -extern int tboot_in_range(paddr_t start, paddr_t end); - -
[Xen-ia64-devel] RE: [IA64] Enhance vt-d support for ia64
Okay, thanks! Xiantao -Original Message- From: Isaku Yamahata [mailto:yamah...@valinux.co.jp] Sent: Tuesday, February 17, 2009 10:12 AM To: Zhang, Xiantao Cc: xen-ia64-devel@lists.xensource.com Subject: Re: [IA64] Enhance vt-d support for ia64 The patch touches the common/x86 part. So please sent it to xen-devel, then I'll pull it. Acked-by: Isaku Yamahata yamah...@valinux.co.jp On Tue, Feb 17, 2009 at 10:57:21AM +0900, Isaku Yamahata wrote: On Tue, Feb 17, 2009 at 09:42:05AM +0800, Zhang, Xiantao wrote: Isaku Yamahata wrote: On Mon, Feb 16, 2009 at 06:33:45PM +0800, Zhang, Xiantao wrote: Hi, Isaku This patch targets for enhancing vt-d support for ia64, could you help to review? It looks mostly good. How about moving arch independent MSI related definitions into a common header file instead of duplicating with xen/include/asm-ia64/msi.h? Good suggestion! But ia64 has different value for one macro with x86 side, so maybe not proper to move them into common header file. You know, in linux kernel, ia64 and X86 also duplicate them to solve the differences. :) Oh I see. Sounds reasonable. Xiantao Xiantao # HG changeset patch # User xian...@vti-build.sh.intel.com # Date 1234244822 -28800 # Node ID 67f2e14613efc0a18924fd60f2561999b9f59a43 # Parent 4fd4dcf2f8916ab4656911a76e52fc6b1ad42c2f [IA64] Enhance vt-d support for ia64 Signed-off-by: Xiantao Zhang xiantao.zh...@intel.com diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/arch/ia64/xen/domain.c Tue Feb 10 13:47:02 2009 +0800 @@ -1999,6 +1999,7 @@ static void __init calc_dom0_size(void) unsigned long p2m_pages; unsigned long spare_hv_pages; unsigned long max_dom0_size; +unsigned long iommu_pg_table_pages = 0; /* Estimate maximum memory we can safely allocate for dom0 * by subtracting the p2m table allocation and a chunk of memory @@ -2009,8 +2010,13 @@ static void __init calc_dom0_size(void) domheap_pages = avail_domheap_pages(); p2m_pages = domheap_pages / PTRS_PER_PTE; spare_hv_pages = 8192 + (domheap_pages / 4096); -max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages)) - * PAGE_SIZE; + +if (iommu_enabled) +iommu_pg_table_pages = domheap_pages * 4 / 512; +/* There are 512 ptes in one 4K vtd page. */ + +max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) - +iommu_pg_table_pages) * PAGE_SIZE; printk(Maximum permitted dom0 size: %luMB\n, max_dom0_size / (1024*1024)); diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/ia64/vtd.c --- a/xen/drivers/passthrough/vtd/ia64/vtd.c Wed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Feb 10 13:47:02 2009 +0800 @@ -111,3 +111,34 @@ void hvm_dpci_isairq_eoi(struct domain * { /* dummy */ } + +static int dom0_set_iommu_mapping(unsigned long start, unsigned long end, + void *arg) +{ +unsigned long tmp, pfn, j, page_addr = start; +struct domain *d = (struct domain *)arg; + +/* + * Set up 1:1 page table for dom0 except the critical segments + * like Xen and tboot. + */ + +while (page_addr end) +{ +if (xen_in_range(page_addr, page_addr + PAGE_SIZE)) + continue; + +pfn = page_addr PAGE_SHIFT; +tmp = 1 (PAGE_SHIFT - PAGE_SHIFT_4K); +for ( j = 0; j tmp; j++ ) +iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j)); + +page_addr += PAGE_SIZE; +} +} + +void iommu_dom0_do_mapping(struct domain *d) +{ +BUG_ON(d != dom0); +efi_memmap_walk(dom0_set_iommu_mapping, d); +} diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.cWed Jan 28 12:22:58 2009 +0900 +++ b/xen/drivers/passthrough/vtd/iommu.cTue Feb 10 13:47:02 2009 +0800 @@ -829,7 +829,6 @@ static void dma_msi_data_init(struct iom spin_unlock_irqrestore(iommu-register_lock, flags); } -#ifdef SUPPORT_MSI_REMAPPING static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) { u64 msi_address; @@ -846,12 +845,6 @@ static void dma_msi_addr_init(struct iom dmar_writel(iommu-reg, DMAR_FEUADDR_REG, (u32)(msi_address 32)); spin_unlock_irqrestore(iommu-register_lock, flags); } -#else -static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) -{ -/* ia64: TODO */ -} -#endif static void dma_msi_set_affinity(unsigned int vector, cpumask_t