There was no objection, here is the patch.
I choosed assign_xxx instead of alloc_xxx.
This is a pure syntax change, so I just did only a compile test.

# HG changeset patch
# User [EMAIL PROTECTED]
# Node ID f7ff70e7e9d50428ef06bd272063d05162c8ffab
# Parent  5fcc346d6fe086436977a9b171f2bdb3a177d828
fix name conflict(map_domain_page)
one is defined in xen/include/xen/domain_page.h. 
another is defined in xen/arch/ia64/xen/domain.c.
this patch renames one defined in xen/arch/ia64/xen/domain.c.

For consistency its family is also renamed.
map_new_domain_page()   -> assign_new_domain_page()
map_domain_page()       -> assign_domain_page() 
map_domain_io_page()    -> assign_domain_io_page()

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r 5fcc346d6fe0 -r f7ff70e7e9d5 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Thu Jan 26 11:31:28 2006 +0100
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Feb  1 11:13:30 2006 +0900
@@ -317,7 +317,7 @@
            for (j = io_ranges[i].start;
                 j < io_ranges[i].start + io_ranges[i].size;
                 j += PAGE_SIZE)
-               map_domain_page(d, j, io_ranges[i].type);
+               assign_domain_page(d, j, io_ranges[i].type);
        }
 
        conf_nr = VMX_CONFIG_PAGES(d);
@@ -334,14 +334,14 @@
        for (i = 0;
             i < (end < MMIO_START ? end : MMIO_START);
             i += PAGE_SIZE, pgnr++)
-           map_domain_page(d, i, pgnr << PAGE_SHIFT);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
 
        /* Map normal memory beyond 4G */
        if (unlikely(end > MMIO_START)) {
            start = 4 * MEM_G;
            end = start + (end - 3 * MEM_G);
            for (i = start; i < end; i += PAGE_SIZE, pgnr++)
-               map_domain_page(d, i, pgnr << PAGE_SHIFT);
+               assign_domain_page(d, i, pgnr << PAGE_SHIFT);
        }
 
        d->arch.max_pfn = end >> PAGE_SHIFT;
@@ -356,7 +356,7 @@
        /* Map guest firmware */
        pgnr = page_to_pfn(page);
        for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
-           map_domain_page(d, i, pgnr << PAGE_SHIFT);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
 
        if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
            printk("Could not allocate order=1 pages for vmx contig alloc\n");
@@ -365,9 +365,9 @@
 
        /* Map for shared I/O page and xenstore */
        pgnr = page_to_pfn(page);
-       map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
+       assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
        pgnr++;
-       map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
+       assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
 
        set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
        return 0;
diff -r 5fcc346d6fe0 -r f7ff70e7e9d5 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Jan 26 11:31:28 2006 +0100
+++ b/xen/arch/ia64/xen/domain.c        Wed Feb  1 11:13:30 2006 +0900
@@ -389,7 +389,7 @@
 }
 
 /* allocate new page for domain and map it to the specified metaphysical addr 
*/
-struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
+struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
 {
        struct mm_struct *mm = d->arch.mm;
        struct page *p = (struct page *)0;
@@ -400,7 +400,7 @@
 extern unsigned long vhpt_paddr, vhpt_pend;
 
        if (!mm->pgd) {
-               printk("map_new_domain_page: domain pgd must exist!\n");
+               printk("assign_new_domain_page: domain pgd must exist!\n");
                return(p);
        }
        pgd = pgd_offset(mm,mpaddr);
@@ -428,21 +428,21 @@
                        if (p) memset(__va(page_to_phys(p)),0,PAGE_SIZE);
                }
                if (unlikely(!p)) {
-printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
                        return(p);
                }
 if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
-  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
+  printf("assign_new_domain_page: reassigned vhpt page 
%p!!\n",page_to_phys(p));
 }
                set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
-       else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_new_domain_page: mpaddr %lx already 
mapped!\n",mpaddr);
        return p;
 }
 
 /* map a physical address to the specified metaphysical addr */
-void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
+void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
 {
        struct mm_struct *mm = d->arch.mm;
        pgd_t *pgd;
@@ -451,7 +451,7 @@
        pte_t *pte;
 
        if (!mm->pgd) {
-               printk("map_domain_page: domain pgd must exist!\n");
+               printk("assign_domain_page: domain pgd must exist!\n");
                return;
        }
        pgd = pgd_offset(mm,mpaddr);
@@ -472,11 +472,11 @@
                set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
-       else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
 #if 0
 /* map a physical address with specified I/O flag */
-void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long 
flags)
+void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned 
long flags)
 {
        struct mm_struct *mm = d->arch.mm;
        pgd_t *pgd;
@@ -486,7 +486,7 @@
        pte_t io_pte;
 
        if (!mm->pgd) {
-               printk("map_domain_page: domain pgd must exist!\n");
+               printk("assign_domain_page: domain pgd must exist!\n");
                return;
        }
        ASSERT(flags & GPFN_IO_MASK);
@@ -509,7 +509,7 @@
                pte_val(io_pte) = flags;
                set_pte(pte, io_pte);
        }
-       else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
 #endif
 void mpafoo(unsigned long mpaddr)
@@ -557,7 +557,7 @@
        }
        /* if lookup fails and mpaddr is "legal", "create" the page */
        if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
-               if (map_new_domain_page(d,mpaddr)) goto tryagain;
+               if (assign_new_domain_page(d,mpaddr)) goto tryagain;
        }
        printk("lookup_domain_mpa: bad mpa %p (> %p\n",
                mpaddr,d->max_pages<<PAGE_SHIFT);
@@ -660,7 +660,7 @@
                if (pteval) dom_imva = __va(pteval & _PFN_MASK);
                else { printf("loaddomainelfimage: BAD!\n"); while(1); }
 #else
-               p = map_new_domain_page(d,dom_mpaddr);
+               p = assign_new_domain_page(d,dom_mpaddr);
                if (unlikely(!p)) BUG();
                dom_imva = __va(page_to_phys(p));
 #endif
diff -r 5fcc346d6fe0 -r f7ff70e7e9d5 xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h        Thu Jan 26 11:31:28 2006 +0100
+++ b/xen/include/asm-ia64/grant_table.h        Wed Feb  1 11:13:30 2006 +0900
@@ -17,7 +17,7 @@
 #define gnttab_shared_mfn(d, t, i)                                      \
     ( ((d) == dom0) ?                                                   \
       ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i)) :               \
-      (map_domain_page((d), 1UL<<40, virt_to_phys((t)->shared)),        \
+      (assign_domain_page((d), 1UL<<40, virt_to_phys((t)->shared)),        \
        1UL << (40 - PAGE_SHIFT))                                        \
     )
 



-- 
yamahata
# HG changeset patch
# User [EMAIL PROTECTED]
# Node ID f7ff70e7e9d50428ef06bd272063d05162c8ffab
# Parent  5fcc346d6fe086436977a9b171f2bdb3a177d828
fix name conflict(map_domain_page)
one is defined in xen/include/xen/domain_page.h. 
another is defined in xen/arch/ia64/xen/domain.c.
this patch renames one defined in xen/arch/ia64/xen/domain.c.

For consistency its family is also renamed.
map_new_domain_page()   -> assign_new_domain_page()
map_domain_page()       -> assign_domain_page() 
map_domain_io_page()    -> assign_domain_io_page()

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r 5fcc346d6fe0 -r f7ff70e7e9d5 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Thu Jan 26 11:31:28 2006 +0100
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Feb  1 11:13:30 2006 +0900
@@ -317,7 +317,7 @@
            for (j = io_ranges[i].start;
                 j < io_ranges[i].start + io_ranges[i].size;
                 j += PAGE_SIZE)
-               map_domain_page(d, j, io_ranges[i].type);
+               assign_domain_page(d, j, io_ranges[i].type);
        }
 
        conf_nr = VMX_CONFIG_PAGES(d);
@@ -334,14 +334,14 @@
        for (i = 0;
             i < (end < MMIO_START ? end : MMIO_START);
             i += PAGE_SIZE, pgnr++)
-           map_domain_page(d, i, pgnr << PAGE_SHIFT);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
 
        /* Map normal memory beyond 4G */
        if (unlikely(end > MMIO_START)) {
            start = 4 * MEM_G;
            end = start + (end - 3 * MEM_G);
            for (i = start; i < end; i += PAGE_SIZE, pgnr++)
-               map_domain_page(d, i, pgnr << PAGE_SHIFT);
+               assign_domain_page(d, i, pgnr << PAGE_SHIFT);
        }
 
        d->arch.max_pfn = end >> PAGE_SHIFT;
@@ -356,7 +356,7 @@
        /* Map guest firmware */
        pgnr = page_to_pfn(page);
        for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
-           map_domain_page(d, i, pgnr << PAGE_SHIFT);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
 
        if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
            printk("Could not allocate order=1 pages for vmx contig alloc\n");
@@ -365,9 +365,9 @@
 
        /* Map for shared I/O page and xenstore */
        pgnr = page_to_pfn(page);
-       map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
+       assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
        pgnr++;
-       map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
+       assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
 
        set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
        return 0;
diff -r 5fcc346d6fe0 -r f7ff70e7e9d5 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Jan 26 11:31:28 2006 +0100
+++ b/xen/arch/ia64/xen/domain.c        Wed Feb  1 11:13:30 2006 +0900
@@ -389,7 +389,7 @@
 }
 
 /* allocate new page for domain and map it to the specified metaphysical addr 
*/
-struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
+struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
 {
        struct mm_struct *mm = d->arch.mm;
        struct page *p = (struct page *)0;
@@ -400,7 +400,7 @@
 extern unsigned long vhpt_paddr, vhpt_pend;
 
        if (!mm->pgd) {
-               printk("map_new_domain_page: domain pgd must exist!\n");
+               printk("assign_new_domain_page: domain pgd must exist!\n");
                return(p);
        }
        pgd = pgd_offset(mm,mpaddr);
@@ -428,21 +428,21 @@
                        if (p) memset(__va(page_to_phys(p)),0,PAGE_SIZE);
                }
                if (unlikely(!p)) {
-printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
                        return(p);
                }
 if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
-  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
+  printf("assign_new_domain_page: reassigned vhpt page 
%p!!\n",page_to_phys(p));
 }
                set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
-       else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_new_domain_page: mpaddr %lx already 
mapped!\n",mpaddr);
        return p;
 }
 
 /* map a physical address to the specified metaphysical addr */
-void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
+void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
 {
        struct mm_struct *mm = d->arch.mm;
        pgd_t *pgd;
@@ -451,7 +451,7 @@
        pte_t *pte;
 
        if (!mm->pgd) {
-               printk("map_domain_page: domain pgd must exist!\n");
+               printk("assign_domain_page: domain pgd must exist!\n");
                return;
        }
        pgd = pgd_offset(mm,mpaddr);
@@ -472,11 +472,11 @@
                set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
-       else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
 #if 0
 /* map a physical address with specified I/O flag */
-void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long 
flags)
+void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned 
long flags)
 {
        struct mm_struct *mm = d->arch.mm;
        pgd_t *pgd;
@@ -486,7 +486,7 @@
        pte_t io_pte;
 
        if (!mm->pgd) {
-               printk("map_domain_page: domain pgd must exist!\n");
+               printk("assign_domain_page: domain pgd must exist!\n");
                return;
        }
        ASSERT(flags & GPFN_IO_MASK);
@@ -509,7 +509,7 @@
                pte_val(io_pte) = flags;
                set_pte(pte, io_pte);
        }
-       else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
 #endif
 void mpafoo(unsigned long mpaddr)
@@ -557,7 +557,7 @@
        }
        /* if lookup fails and mpaddr is "legal", "create" the page */
        if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
-               if (map_new_domain_page(d,mpaddr)) goto tryagain;
+               if (assign_new_domain_page(d,mpaddr)) goto tryagain;
        }
        printk("lookup_domain_mpa: bad mpa %p (> %p\n",
                mpaddr,d->max_pages<<PAGE_SHIFT);
@@ -660,7 +660,7 @@
                if (pteval) dom_imva = __va(pteval & _PFN_MASK);
                else { printf("loaddomainelfimage: BAD!\n"); while(1); }
 #else
-               p = map_new_domain_page(d,dom_mpaddr);
+               p = assign_new_domain_page(d,dom_mpaddr);
                if (unlikely(!p)) BUG();
                dom_imva = __va(page_to_phys(p));
 #endif
diff -r 5fcc346d6fe0 -r f7ff70e7e9d5 xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h        Thu Jan 26 11:31:28 2006 +0100
+++ b/xen/include/asm-ia64/grant_table.h        Wed Feb  1 11:13:30 2006 +0900
@@ -17,7 +17,7 @@
 #define gnttab_shared_mfn(d, t, i)                                      \
     ( ((d) == dom0) ?                                                   \
       ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i)) :               \
-      (map_domain_page((d), 1UL<<40, virt_to_phys((t)->shared)),        \
+      (assign_domain_page((d), 1UL<<40, virt_to_phys((t)->shared)),        \
        1UL << (40 - PAGE_SHIFT))                                        \
     )
 
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

Reply via email to