Re: [XenPPC] [PATCH 4 of 6] [PATCH] xen: implement guest_physmap_{add/remove}_page for ppc

2007-02-22 Thread Ryan Harper
* Hollis Blanchard <[EMAIL PROTECTED]> [2007-02-22 16:20]:
> On Wed, 2007-02-21 at 18:17 -0500, Ryan Harper wrote:
> > @@ -504,17 +508,15 @@ unsigned long mfn_to_gmfn(struct domain 
> >  mfn < (rma_mfn + (1 << d->arch.rma_order)))
> >  return mfn - rma_mfn;
> > 
> > -/* Extent? */
> > -cur_pfn = 1UL << d->arch.rma_order;
> > -list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
> > -uint pe_pages = 1UL << pe->order;
> > -uint b_mfn = page_to_mfn(pe->pg);
> > -uint e_mfn = b_mfn + pe_pages;
> > -
> > -if (mfn >= b_mfn && mfn < e_mfn) {
> > +/* check extents (cpu-defined contiguous chunks after RMA) */
> > +cur_pfn = 1UL << d->arch.rma_order; /* start looking after RMA */
> > +for ( ; cur_pfn < d->max_pages; cur_pfn += ext_nrpages )
> > +{
> > +uint b_mfn = d->arch.p2m[cur_pfn];
> > +uint e_mfn = b_mfn + ext_nrpages;
> > +
> > +if (mfn >= b_mfn && mfn < e_mfn)
> >  return cur_pfn + (mfn - b_mfn);
> > -}
> > -cur_pfn += pe_pages;
> >  }
> >  return INVALID_M2P_ENTRY;
> >  } 
> 
> I think you're splitting these patches up a lot more than necessary (to
> the point I've having a hard time understanding them). Also, the above
> code is just removed by the next patch! If you combine 4 and 5 I think
> it will actually be smaller and easier to understand.

OK

> 
> I didn't realize these were just RFC. When you resubmit, could you put a
> little more description in each commit message?

Yeah, I should have put RFC in the subject.  I'll expand the
descriptions in the patches as well.


-- 
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253   T/L: 678-9253
[EMAIL PROTECTED]

___
Xen-ppc-devel mailing list
Xen-ppc-devel@lists.xensource.com
http://lists.xensource.com/xen-ppc-devel


Re: [XenPPC] [PATCH 4 of 6] [PATCH] xen: implement guest_physmap_{add/remove}_page for ppc

2007-02-22 Thread Hollis Blanchard
On Wed, 2007-02-21 at 18:17 -0500, Ryan Harper wrote:
> @@ -504,17 +508,15 @@ unsigned long mfn_to_gmfn(struct domain 
>  mfn < (rma_mfn + (1 << d->arch.rma_order)))
>  return mfn - rma_mfn;
> 
> -/* Extent? */
> -cur_pfn = 1UL << d->arch.rma_order;
> -list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
> -uint pe_pages = 1UL << pe->order;
> -uint b_mfn = page_to_mfn(pe->pg);
> -uint e_mfn = b_mfn + pe_pages;
> -
> -if (mfn >= b_mfn && mfn < e_mfn) {
> +/* check extents (cpu-defined contiguous chunks after RMA) */
> +cur_pfn = 1UL << d->arch.rma_order; /* start looking after RMA */
> +for ( ; cur_pfn < d->max_pages; cur_pfn += ext_nrpages )
> +{
> +uint b_mfn = d->arch.p2m[cur_pfn];
> +uint e_mfn = b_mfn + ext_nrpages;
> +
> +if (mfn >= b_mfn && mfn < e_mfn)
>  return cur_pfn + (mfn - b_mfn);
> -}
> -cur_pfn += pe_pages;
>  }
>  return INVALID_M2P_ENTRY;
>  } 

I think you're splitting these patches up a lot more than necessary (to
the point I've having a hard time understanding them). Also, the above
code is just removed by the next patch! If you combine 4 and 5 I think
it will actually be smaller and easier to understand.

I didn't realize these were just RFC. When you resubmit, could you put a
little more description in each commit message?

-- 
Hollis Blanchard
IBM Linux Technology Center


___
Xen-ppc-devel mailing list
Xen-ppc-devel@lists.xensource.com
http://lists.xensource.com/xen-ppc-devel


[XenPPC] [PATCH 4 of 6] [PATCH] xen: implement guest_physmap_{add/remove}_page for ppc

2007-02-21 Thread Ryan Harper
3 files changed, 73 insertions(+), 40 deletions(-)
xen/arch/powerpc/domain_build.c |4 +
xen/arch/powerpc/mm.c   |  101 +++
xen/arch/powerpc/ofd_fixup_memory.c |8 --


# HG changeset patch
# User Ryan Harper <[EMAIL PROTECTED]>
# Date 1172103252 21600
# Node ID 33f05ec503bfabccd119f06b30037b618f8d05b9
# Parent  35fd77200dff7e73fe3959b5dbfa6088c691c502
[PATCH] xen: implement guest_physmap_{add/remove}_page for ppc

Signed-off-by: Ryan Harper <[EMAIL PROTECTED]>

diff -r 35fd77200dff -r 33f05ec503bf xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c   Wed Feb 21 18:14:12 2007 -0600
+++ b/xen/arch/powerpc/domain_build.c   Wed Feb 21 18:14:12 2007 -0600
@@ -178,10 +178,12 @@ int construct_dom0(struct domain *d,
 rma_sz = rma_size(d->arch.rma_order);
 rma = page_to_maddr(d->arch.rma_page);
 
-/* make sure we are at least as big as the RMA */
+/* if requested dom0 RAM amount is more than the RMA, then alloc
+   the rest in cpu-defined extent sized chunks */
 if (dom0_nrpages > rma_nrpages)
 dom0_nrpages = allocate_extents(d, dom0_nrpages, rma_nrpages);
 
+/* make sure we are at least as big as the RMA */
 ASSERT(d->tot_pages == dom0_nrpages);
 ASSERT(d->tot_pages >= rma_nrpages);
 
diff -r 35fd77200dff -r 33f05ec503bf xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Wed Feb 21 18:14:12 2007 -0600
+++ b/xen/arch/powerpc/mm.c Wed Feb 21 18:14:12 2007 -0600
@@ -319,11 +319,15 @@ void free_extents(struct domain *d)
 }
 }
 
+/* allocate rma_nrpages - nrpages  more memory for domain in proper size */
 uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
 {
 uint ext_order;
 uint ext_nrpages;
 uint total_nrpages;
+ulong mfn;
+ulong gpfn = rma_nrpages; /* starting pfn at end of RMA */
+int i;
 struct page_info *pg;
 
 ext_order = cpu_extent_order();
@@ -338,10 +342,13 @@ uint allocate_extents(struct domain *d, 
 if (pg == NULL)
 return total_nrpages;
 
-if (add_extent(d, pg, ext_order) < 0) {
-free_domheap_pages(pg, ext_order);
-return total_nrpages;
-}
+/* build p2m mapping for newly allocated extent */
+mfn = page_to_mfn(pg);
+for ( i = 0; i < (1 << ext_order); i++ )
+guest_physmap_add_page(d, gpfn + i, mfn + i);
+
+/* bump starting pfn by extent size pages */
+gpfn += ext_nrpages;
 total_nrpages += ext_nrpages;
 }
 
@@ -353,6 +360,7 @@ int allocate_rma(struct domain *d, unsig
 struct vcpu *v;
 ulong rma_base;
 ulong rma_sz;
+ulong mfn = INVALID_MFN;
 int i;
 
 if (d->arch.rma_page)
@@ -374,10 +382,15 @@ int allocate_rma(struct domain *d, unsig
 printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
d->domain_id, rma_base, rma_sz);
 
+mfn = page_to_mfn(d->arch.rma_page);
+
 for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
 /* Add in any extra CPUs that need flushing because of this page. */
 d->arch.rma_page[i].count_info |= PGC_page_RMA;
 clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
+
+/* setup p2m mapping for RMA */
+guest_physmap_add_page(d, i, mfn+i);
 }
 
 d->shared_info = (shared_info_t *)
@@ -403,7 +416,6 @@ ulong pfn2mfn(struct domain *d, ulong pf
 {
 ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
 ulong rma_size_mfn = 1UL << d->arch.rma_order;
-struct page_extents *pe;
 ulong mfn = INVALID_MFN;
 int t = PFN_TYPE_NONE;
 ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
@@ -424,18 +436,9 @@ ulong pfn2mfn(struct domain *d, ulong pf
 t = PFN_TYPE_RMA;
 mfn = pfn + rma_base_mfn;
 } else {
-ulong cur_pfn = rma_size_mfn;
-
-list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
-uint pe_pages = 1UL << pe->order;
-uint end_pfn = cur_pfn + pe_pages;
-
-if (pfn >= cur_pfn && pfn < end_pfn) {
-t = PFN_TYPE_LOGICAL;
-mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn);
-break;
-}
-cur_pfn += pe_pages;
+if ( pfn < d->max_pages ) {
+t = PFN_TYPE_LOGICAL;
+mfn = d->arch.p2m[pfn];
 }
 }
 #ifdef DEBUG
@@ -483,12 +486,13 @@ ulong pfn2mfn(struct domain *d, ulong pf
 return mfn;
 }
 
+/* mfn_to_pfn */
 unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
 {
-struct page_extents *pe;
 ulong cur_pfn;
 ulong gnttab_mfn;
 ulong rma_mfn;
+uint ext_nrpages = (1 << cpu_extent_order());
 
 /* grant? */
 gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
@@ -504,17 +508,15 @@ unsigned long mfn_to_gmfn(struct domain 
 mfn < (rma_mfn + (1 << d->arch.rma_order)))
 return mfn - rma_mfn;
 
-