On Mon, Jun 11, 2007 at 03:55:30PM +0900, Akio Takebe wrote:
Content-Description: Mail message body

> >On Fri, Jun 08, 2007 at 11:52:30PM +0900, Isaku Yamahata wrote:
> >> Thank you very much for information. It greatly helped.
> >> Could you try the attached patch with the previsou patch?
> >> I hope it fixes the foreign domain page mapping case.
> >> However there remains the grant table mapping issue.
> >
> >When backend maps ring page of frontend domain,
> >the debug message is printed out.
> >So only a few messages is okay.
> >When you tested, did you see only a few messages whose backtrace
> >includes grant table or many messages?
> I retried 15165 + test.patch (including your patch).
> I had the only following message while a booting of domU.
> But if I do shutdown on the domU, then Hypervisor doesn't panic.
> The following backtraces include __do_dom0vp_add_physmap(),
> Is that OK? 

Unfortunately no. I reviewed the linux side code and found an off-by-one bug.
Could you try again with the three attached patches (+ debug message patch).
(Note that I sent out two of three attached patch. I attached those just
for convinience.)
Thank you very much for your patience. You found three bugs!
I hope that you won't hit more bugs.

thanks,
-- 
yamahata
# HG changeset patch
# User [EMAIL PROTECTED]
# Date 1181551046 -32400
# Node ID 9296439c575517beb6d51239094d7aa7db126c6e
# Parent  7305e027b17dc3123bbbcf015d43b904f38c959a
fix off-by-one bug in p2m_expose_init
PATCHNAME: fix_off_by_one_p2m_expose_init

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r 7305e027b17d -r 9296439c5755 linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Fri Jun 08 23:46:51 2007 +0900
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Mon Jun 11 17:37:26 2007 +0900
@@ -1029,9 +1029,9 @@ p2m_expose_init(void)
 			                                granule_pfn);
 			p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
 			                              granule_pfn);
-			num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
+			num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn + 1;
 			p2m_expose_size = num_pfn << PAGE_SHIFT;
-			p2m_size = num_pfn / PTRS_PER_PTE;
+			p2m_size = (num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
 			p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
 			if (p2m_size == page_size)
 				break;
@@ -1049,9 +1049,9 @@ p2m_expose_init(void)
 		p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
 		                                p2m_granule_pfn);
 		p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
-		num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
+		num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn + 1;
 		p2m_expose_size = num_pfn << PAGE_SHIFT;
-		p2m_size = num_pfn / PTRS_PER_PTE;
+		p2m_size = (num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
 		p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
 		align = max(privcmd_resource_align,
 		            p2m_granule_pfn << PAGE_SHIFT);
@@ -1064,7 +1064,7 @@ p2m_expose_init(void)
 	if (error) {
 		printk(KERN_ERR P2M_PREFIX
 		       "can't allocate region for p2m exposure "
-		       "[0x%016lx, 0x%016lx) 0x%016lx\n",
+		       "[0x%016lx, 0x%016lx] 0x%016lx\n",
 		       p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size);
 		goto out;
 	}
@@ -1102,10 +1102,10 @@ p2m_expose_init(void)
 	p2m_initialized = 1;
 	printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n",
 	       p2m_convert_min_pfn << PAGE_SHIFT,
-	       p2m_convert_max_pfn << PAGE_SHIFT);
+	       (p2m_convert_max_pfn << PAGE_SHIFT) + PAGE_SIZE);
 	printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n",
 	       p2m_assign_start_pfn << PAGE_SHIFT,
-	       p2m_assign_end_pfn << PAGE_SHIFT,
+	       (p2m_assign_end_pfn << PAGE_SHIFT) + PAGE_SIZE,
 	       p2m_size / 1024);
 out:
 	unlock_cpu_hotplug();
# HG changeset patch
# User [EMAIL PROTECTED]
# Date 1181314011 -32400
# Node ID 7305e027b17dc3123bbbcf015d43b904f38c959a
# Parent  afbcf495550da3625d12ca5ae10120661b972a99
fix dom0vp_expose_p2m(). It exposes one more zero p2m pages.
PATCHNAME: fix_one_more_zero_p2m_page

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r afbcf495550d -r 7305e027b17d xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c	Fri Jun 08 12:35:19 2007 +0900
+++ b/xen/arch/ia64/xen/mm.c	Fri Jun 08 23:46:51 2007 +0900
@@ -1487,7 +1487,7 @@ dom0vp_expose_p2m(struct domain* d,
     }
 
     // expose p2m_pte_zero_page 
-    for (i = 0; i < expose_num_pfn / PTRS_PER_PTE + 1; i++) {
+    for (i = 0; i < (expose_num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE; i++) {
         assign_pte = lookup_noalloc_domain_pte(d, (assign_start_gpfn + i) <<
                                                PAGE_SHIFT);
         if (assign_pte == NULL || pte_present(*assign_pte))
# HG changeset patch
# User [EMAIL PROTECTED]
# Date 1181273719 -32400
# Node ID afbcf495550da3625d12ca5ae10120661b972a99
# Parent  96331db61e47065da5d374a9ec432d6837aaf5af
Fix p2m exposure. p2m table page doesn't belong to any domain so that
special handling required.
Instead, dom_p2m is introduced and make all p2m page belong to dom_p2m.
PATCHNAME: fix_p2m_exposure

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r 96331db61e47 -r afbcf495550d xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c	Wed Jun 06 09:30:01 2007 -0600
+++ b/xen/arch/ia64/xen/mm.c	Fri Jun 08 12:35:19 2007 +0900
@@ -187,6 +187,12 @@ extern unsigned long ia64_iobase;
 
 static struct domain *dom_xen, *dom_io;
 
+/* This number is the bigger number than DOMID_SELF, DOMID_XEN and DOMID_IO.
+ * If more new reserved domain id is introduced, this might be increased.
+ */
+#define DOMID_P2M       (0x7FF8U)
+static struct domain *dom_p2m;
+
 // followings are stolen from arch_init_memory() @ xen/arch/x86/mm.c
 void
 alloc_dom_xen_and_dom_io(void)
@@ -227,11 +233,8 @@ mm_teardown_pte(struct domain* d, volati
     if (!mfn_valid(mfn))
         return;
     page = mfn_to_page(mfn);
-    // page might be pte page for p2m exposing. check it.
-    if (page_get_owner(page) == NULL) {
-        BUG_ON(page->count_info != 0);
-        return;
-    }
+    BUG_ON(page_get_owner(page) == NULL);
+
     // struct page_info corresponding to mfn may exist or not depending
     // on CONFIG_VIRTUAL_FRAME_TABLE.
     // The above check is too easy.
@@ -1379,10 +1382,19 @@ dom0vp_add_physmap_with_gmfn(struct doma
 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
 static struct page_info* p2m_pte_zero_page = NULL;
 
+/* This must called before dom0 p2m table allocation */
 void __init
 expose_p2m_init(void)
 {
     pte_t* pte;
+
+    /*
+     * Initialise our DOMID_IO domain.
+     * This domain owns m2p table pages.
+     */
+    dom_p2m = alloc_domain(DOMID_P2M);
+    BUG_ON(dom_p2m == NULL);
+    dom_p2m->max_pages = ~0U;
 
     pte = pte_alloc_one_kernel(NULL, 0);
     BUG_ON(pte == NULL);
@@ -1393,13 +1405,8 @@ static int
 static int
 expose_p2m_page(struct domain* d, unsigned long mpaddr, struct page_info* page)
 {
-    // we can't get_page(page) here.
-    // pte page is allocated form xen heap.(see pte_alloc_one_kernel().)
-    // so that the page has NULL page owner and it's reference count
-    // is useless.
-    // see also mm_teardown_pte()'s page_get_owner() == NULL check.
-    BUG_ON(page_get_owner(page) != NULL);
-
+    int ret = get_page(page, dom_p2m);
+    BUG_ON(ret != 1);
     return __assign_domain_page(d, mpaddr, page_to_maddr(page),
                                 ASSIGN_readonly);
 }
@@ -1918,8 +1925,9 @@ void *pgtable_quicklist_alloc(void)
 void *pgtable_quicklist_alloc(void)
 {
     void *p;
+    BUG_ON(dom_p2m == NULL);
     if (!opt_p2m_xenheap) {
-        struct page_info *page = alloc_domheap_page(NULL);
+        struct page_info *page = alloc_domheap_page(dom_p2m);
         if (page == NULL)
             return NULL;
         p = page_to_virt(page);
@@ -1927,16 +1935,28 @@ void *pgtable_quicklist_alloc(void)
         return p;
     }
     p = alloc_xenheap_pages(0);
-    if (p)
+    if (p) {
         clear_page(p);
+        share_xen_page_with_guest(virt_to_page(p), dom_p2m,
+                                  /*
+                                   * This page should be read only.
+                                   * At this moment, the third argument doesn't
+                                   * make sense.
+                                   * It should be 1 when supported .
+                                   */
+                                  0);
+    }
     return p;
 }
 
 void pgtable_quicklist_free(void *pgtable_entry)
 {
-    if (!opt_p2m_xenheap)
-        free_domheap_page(virt_to_page(pgtable_entry));
-    else
+    struct page_info* page = virt_to_page(pgtable_entry);
+    BUG_ON(page_get_owner(page) != dom_p2m);
+    BUG_ON(page->count_info != (1 | PGC_allocated));
+
+    put_page(page);
+    if (opt_p2m_xenheap)
         free_xenheap_page(pgtable_entry);
 }
 
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

Reply via email to