Re: [Xen-devel] superpages lost after migration of HVM domU

2017-04-28 Thread Olaf Hering
On Wed, Apr 26, Andrew Cooper wrote:

> On 26/04/17 16:43, Olaf Hering wrote:
> > On Thu, Apr 20, Jan Beulich wrote:
> >
> > On 20.04.17 at 18:04,  wrote:
> >>> On Thu, Apr 20, Andrew Cooper wrote:
> >>>
>  As it currently stands, the sending side iterates from 0 to p2m_size,
>  and sends every frame on the first pass.  This means we get PAGE_DATA
>  records linearly, in batches of 1024, or two aligned 2M superpages.
> >>> Is there a way to preserve 1G pages? This 380G domU I'm looking at is
> >>> built with 4k:461390 2M:2341 1G:365 pages.
> >> I think we've hashed out a possible way to deal with this, by
> >> speculatively allocating 1G pages as long as the allocation cap for
> >> the domain allows, subsequently punching holes into those pages
> >> if we can't allocate any new pages anymore (due to otherwise
> >> overrunning the cap).
> > The result is not pretty. This HVM-only approach appears to work for a
> > domU with "memory=3024" and localhost migration.
> > It is required to punch holes as soon as possible to avoid errors in
> > xenforeignmemory_map due to "Over-allocation". Would be nice if the
> > receiver gets a memory map upfront to avoid all stunts...
> 
> Oh - I was about to start working on this.  This is a pleasant surprise. :)

Here is a variant that actually works for migration between two dom0s.

--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -107,6 +107,9 @@ struct xc_sr_save_ops
  */
 struct xc_sr_restore_ops
 {
+/* Allocate a MFN for the given PFN */
+int (*allocate_pfn)(struct xc_sr_context *ctx, xen_pfn_t pfn);
+
 /* Convert a PFN to GFN.  May return ~0UL for an invalid mapping. */
 xen_pfn_t (*pfn_to_gfn)(const struct xc_sr_context *ctx, xen_pfn_t pfn);
 
@@ -172,6 +175,52 @@ struct xc_sr_x86_pv_restore_vcpu
 size_t basicsz, extdsz, xsavesz, msrsz;
 };
 
+struct xc_sr_bitmap
+{
+void *p;
+unsigned long bits;
+};
+
+extern bool _xc_sr_bitmap_resize(struct xc_sr_bitmap *bm, unsigned long bits);
+static inline bool xc_sr_bitmap_resize(struct xc_sr_bitmap *bm, unsigned long 
bits)
+{
+if (bits > bm->bits)
+return _xc_sr_bitmap_resize(bm, bits);
+return true;
+}
+
+static inline void xc_sr_bitmap_free(struct xc_sr_bitmap *bm)
+{
+free(bm->p);
+}
+
+static inline bool xc_sr_set_bit(unsigned long bit, struct xc_sr_bitmap *bm)
+{
+if (!xc_sr_bitmap_resize(bm, bit))
+return false;
+
+set_bit(bit, bm->p);
+return true;
+}
+
+static inline bool xc_sr_test_bit(unsigned long bit, struct xc_sr_bitmap *bm)
+{
+if (bit > bm->bits)
+return false;
+return !!test_bit(bit, bm->p);
+}
+
+static inline int xc_sr_test_and_clear_bit(unsigned long bit, struct 
xc_sr_bitmap *bm)
+{
+return test_and_clear_bit(bit, bm->p);
+}
+
+static inline int xc_sr_test_and_set_bit(unsigned long bit, struct 
xc_sr_bitmap *bm)
+{
+return test_and_set_bit(bit, bm->p);
+}
+
+
 struct xc_sr_context
 {
 xc_interface *xch;
@@ -256,8 +305,7 @@ struct xc_sr_context
 domid_t  xenstore_domid,  console_domid;
 
 /* Bitmap of currently populated PFNs during restore. */
-unsigned long *populated_pfns;
-xen_pfn_t max_populated_pfn;
+struct xc_sr_bitmap populated_pfns;
 
 /* Sender has invoked verify mode on the stream. */
 bool verify;
@@ -332,6 +380,12 @@ struct xc_sr_context
 /* HVM context blob. */
 void *context;
 size_t contextsz;
+
+/* Bitmap of currently allocated PFNs during restore. */
+struct xc_sr_bitmap attempted_1g;
+struct xc_sr_bitmap attempted_2m;
+struct xc_sr_bitmap allocated_pfns;
+unsigned long alloc_cnt;
 } restore;
 };
 } x86_hvm;
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -71,11 +71,9 @@ static int read_headers(struct xc_sr_con
 /*
  * Is a pfn populated?
  */
-static bool pfn_is_populated(const struct xc_sr_context *ctx, xen_pfn_t pfn)
+static bool pfn_is_populated(struct xc_sr_context *ctx, xen_pfn_t pfn)
 {
-if ( pfn > ctx->restore.max_populated_pfn )
-return false;
-return test_bit(pfn, ctx->restore.populated_pfns);
+return xc_sr_test_bit(pfn, >restore.populated_pfns);
 }
 
 /*
@@ -87,42 +85,12 @@ static int pfn_set_populated(struct xc_s
 {
 xc_interface *xch = ctx->xch;
 
-if ( pfn > ctx->restore.max_populated_pfn )
+if ( !xc_sr_set_bit(pfn, >restore.populated_pfns) )
 {
-xen_pfn_t new_max;
-size_t old_sz, new_sz;
-unsigned long *p;
-
-/* Round up to the nearest power of two larger than pfn, less 1. */
-new_max = pfn;
-new_max |= new_max >> 1;
-new_max |= new_max >> 2;
-new_max |= new_max >> 4;
-new_max |= new_max >> 8;
-new_max |= new_max 

Re: [Xen-devel] superpages lost after migration of HVM domU

2017-04-26 Thread Andrew Cooper
On 26/04/17 16:43, Olaf Hering wrote:
> On Thu, Apr 20, Jan Beulich wrote:
>
> On 20.04.17 at 18:04,  wrote:
>>> On Thu, Apr 20, Andrew Cooper wrote:
>>>
 As it currently stands, the sending side iterates from 0 to p2m_size,
 and sends every frame on the first pass.  This means we get PAGE_DATA
 records linearly, in batches of 1024, or two aligned 2M superpages.
>>> Is there a way to preserve 1G pages? This 380G domU I'm looking at is
>>> built with 4k:461390 2M:2341 1G:365 pages.
>> I think we've hashed out a possible way to deal with this, by
>> speculatively allocating 1G pages as long as the allocation cap for
>> the domain allows, subsequently punching holes into those pages
>> if we can't allocate any new pages anymore (due to otherwise
>> overrunning the cap).
> The result is not pretty. This HVM-only approach appears to work for a
> domU with "memory=3024" and localhost migration.
> It is required to punch holes as soon as possible to avoid errors in
> xenforeignmemory_map due to "Over-allocation". Would be nice if the
> receiver gets a memory map upfront to avoid all stunts...

Oh - I was about to start working on this.  This is a pleasant surprise. :)

One of the many outstanding problems with migration is that there is not
a memory map at all.  There really should be one, and it should be at
the head of the migration stream, along with other things currently
missing such as the CPUID policy.  (I'm working on this, but it isn't
going very fast.)

I'll review the patch as soon as I am free.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] superpages lost after migration of HVM domU

2017-04-26 Thread Olaf Hering
On Thu, Apr 20, Jan Beulich wrote:

> >>> On 20.04.17 at 18:04,  wrote:
> > On Thu, Apr 20, Andrew Cooper wrote:
> > 
> >> As it currently stands, the sending side iterates from 0 to p2m_size,
> >> and sends every frame on the first pass.  This means we get PAGE_DATA
> >> records linearly, in batches of 1024, or two aligned 2M superpages.
> > Is there a way to preserve 1G pages? This 380G domU I'm looking at is
> > built with 4k:461390 2M:2341 1G:365 pages.
> I think we've hashed out a possible way to deal with this, by
> speculatively allocating 1G pages as long as the allocation cap for
> the domain allows, subsequently punching holes into those pages
> if we can't allocate any new pages anymore (due to otherwise
> overrunning the cap).

The result is not pretty. This HVM-only approach appears to work for a
domU with "memory=3024" and localhost migration.
It is required to punch holes as soon as possible to avoid errors in
xenforeignmemory_map due to "Over-allocation". Would be nice if the
receiver gets a memory map upfront to avoid all stunts...

Olaf

diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index a83f22af4e..36e7891dde 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -107,6 +107,9 @@ struct xc_sr_save_ops
  */
 struct xc_sr_restore_ops
 {
+/* Allocate a MFN for the given PFN */
+int (*allocate_pfn)(struct xc_sr_context *ctx, xen_pfn_t pfn);
+
 /* Convert a PFN to GFN.  May return ~0UL for an invalid mapping. */
 xen_pfn_t (*pfn_to_gfn)(const struct xc_sr_context *ctx, xen_pfn_t pfn);
 
@@ -331,6 +334,14 @@ struct xc_sr_context
 /* HVM context blob. */
 void *context;
 size_t contextsz;
+
+/* Bitmap of currently allocated PFNs during restore. */
+xen_pfn_t *sp_extents;
+unsigned long *attempted_1g;
+unsigned long *attempted_2m;
+unsigned long *allocated_pfns;
+xen_pfn_t max_allocated_pfn;
+unsigned long alloc_cnt;
 } restore;
 };
 } x86_hvm;
diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index 3549f0a1ae..2e8d15307f 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -135,6 +135,7 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned count,
   const xen_pfn_t *original_pfns, const uint32_t *types)
 {
 xc_interface *xch = ctx->xch;
+xen_pfn_t min_pfn = original_pfns[0], max_pfn = original_pfns[0];
 xen_pfn_t *mfns = malloc(count * sizeof(*mfns)),
 *pfns = malloc(count * sizeof(*pfns));
 unsigned i, nr_pfns = 0;
@@ -149,11 +150,18 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned 
count,
 
 for ( i = 0; i < count; ++i )
 {
+if (original_pfns[i] < min_pfn)
+min_pfn = original_pfns[i];
+if (original_pfns[i] > max_pfn)
+max_pfn = original_pfns[i];
 if ( (!types || (types &&
  (types[i] != XEN_DOMCTL_PFINFO_XTAB &&
   types[i] != XEN_DOMCTL_PFINFO_BROKEN))) &&
  !pfn_is_populated(ctx, original_pfns[i]) )
 {
+rc = ctx->restore.ops.allocate_pfn(ctx, original_pfns[i]);
+if ( rc )
+goto err;
 rc = pfn_set_populated(ctx, original_pfns[i]);
 if ( rc )
 goto err;
@@ -161,6 +169,16 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned 
count,
 ++nr_pfns;
 }
 }
+IPRINTF("checking range %lx %lx\n", min_pfn, max_pfn);
+while (min_pfn < max_pfn) {
+if (!pfn_is_populated(ctx, min_pfn) && test_and_clear_bit(min_pfn, 
ctx->x86_hvm.restore.allocated_pfns)) {
+xen_pfn_t pfn = min_pfn;
+rc = xc_domain_decrease_reservation_exact(xch, ctx->domid, 1, 0, 
);
+IPRINTF("free %lx %lx %d\n", min_pfn, pfn, rc);
+}
+min_pfn++;
+}
+nr_pfns = 0;
 
 if ( nr_pfns )
 {
@@ -723,6 +741,10 @@ static void cleanup(struct xc_sr_context *ctx)

NRPAGES(bitmap_size(ctx->restore.p2m_size)));
 free(ctx->restore.buffered_records);
 free(ctx->restore.populated_pfns);
+free(ctx->x86_hvm.restore.sp_extents);
+free(ctx->x86_hvm.restore.attempted_1g);
+free(ctx->x86_hvm.restore.attempted_2m);
+free(ctx->x86_hvm.restore.allocated_pfns);
 if ( ctx->restore.ops.cleanup(ctx) )
 PERROR("Failed to clean up");
 }
@@ -810,6 +832,17 @@ static int restore(struct xc_sr_context *ctx)
 saved_errno = errno;
 saved_rc = rc;
 PERROR("Restore failed");
+{
+unsigned long i;
+bool a, p;
+IPRINTF("alloc_cnt %lu\n", ctx->x86_hvm.restore.alloc_cnt);
+for (i = 0; i < ctx->restore.p2m_size; i++) {
+p = test_bit(i, 

Re: [Xen-devel] superpages lost after migration of HVM domU

2017-04-20 Thread Jan Beulich
>>> On 20.04.17 at 18:04,  wrote:
> On Thu, Apr 20, Andrew Cooper wrote:
> 
>> As it currently stands, the sending side iterates from 0 to p2m_size,
>> and sends every frame on the first pass.  This means we get PAGE_DATA
>> records linearly, in batches of 1024, or two aligned 2M superpages.
> 
> Is there a way to preserve 1G pages? This 380G domU I'm looking at is
> built with 4k:461390 2M:2341 1G:365 pages.

I think we've hashed out a possible way to deal with this, by
speculatively allocating 1G pages as long as the allocation cap for
the domain allows, subsequently punching holes into those pages
if we can't allocate any new pages anymore (due to otherwise
overrunning the cap). For ballooned down guests this may add
some unnecessary overhead during migration, but I think the
post-migration benefit outweighs this. All that would be needed
is a second bitmap paralleling the one tracking populated pages,
to also track which pages we've actually seen data for (as those
obviously aren't candidates for the hole punching).

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] superpages lost after migration of HVM domU

2017-04-20 Thread Olaf Hering
On Thu, Apr 20, Andrew Cooper wrote:

> As it currently stands, the sending side iterates from 0 to p2m_size,
> and sends every frame on the first pass.  This means we get PAGE_DATA
> records linearly, in batches of 1024, or two aligned 2M superpages.

Is there a way to preserve 1G pages? This 380G domU I'm looking at is
built with 4k:461390 2M:2341 1G:365 pages.

Was there any performance testing done with the new code? I think the
15-20% degradion we are seeing might be caused by this.


Olaf


signature.asc
Description: PGP signature
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] superpages lost after migration of HVM domU

2017-04-20 Thread Andrew Cooper
On 20/04/17 16:35, Olaf Hering wrote:
> Andrew,
>
> with eab806a097b ("tools/libxc: x86 PV restore code") the only call of
> xc_domain_populate_physmap_exact was added to the new restore code.
> This call always sets order=0. The old migration code did consider
> superpages, the new one does not.
>
> What is the reason for not using superpages when populating a HVM domU?
>
> I supposed the first iteration would allocate all of the required memory
> for a domU, perhaps as superpages. ~ollowing iterations would just refill
> existing pages.

That was actually a bugfix for an existing migration failure, and at the
time I didn't consider the performance impact.  (At the time of
migration v2, post-migrate runtime performance was at the very bottom of
the priority list).

The calculations of when to use larger order allocations were buggy, and
could end up trying to allocate more than nr_pages, which causes a hard
failure of the migration.  This only ended up being a problem when
certain gfns had been ballooned out, but it resulted in a hard failure
on the destination side.

As it currently stands, the sending side iterates from 0 to p2m_size,
and sends every frame on the first pass.  This means we get PAGE_DATA
records linearly, in batches of 1024, or two aligned 2M superpages.

Therefore, it should be easy to tweak xc_sr_restore.c:populate_pfns() to
find ranges of 512 consecuative gfns of XEN_DOMCTL_PFINFO_NOTAB and make
a single order 9 allocation, rather than an 512 order 0 allocations.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] superpages lost after migration of HVM domU

2017-04-20 Thread Olaf Hering
Andrew,

with eab806a097b ("tools/libxc: x86 PV restore code") the only call of
xc_domain_populate_physmap_exact was added to the new restore code.
This call always sets order=0. The old migration code did consider
superpages, the new one does not.

What is the reason for not using superpages when populating a HVM domU?

I supposed the first iteration would allocate all of the required memory
for a domU, perhaps as superpages. ~ollowing iterations would just refill
existing pages.

Olaf


signature.asc
Description: PGP signature
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel