The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6540

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Lays the groundwork to link the new storage layer to instance migration source.
From e0cfd645988c734a18d022ba3a83734b0c57d9ae Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 3 Dec 2019 10:29:10 +0000
Subject: [PATCH 1/6] lxd/container/post: Returns instances resources from
 containerPost

Removes duplicated instance type container check (this is checked in 
migrationSourceWs.Do).

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/container_post.go | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/lxd/container_post.go b/lxd/container_post.go
index b8b6ba8592..9139571dd2 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -206,18 +206,14 @@ func containerPost(d *Daemon, r *http.Request) 
response.Response {
                }
 
                instanceOnly := req.InstanceOnly || req.ContainerOnly
-
-               if inst.Type() != instancetype.Container {
-                       return response.SmartError(fmt.Errorf("Instance is not 
container type"))
-               }
-
                ws, err := NewMigrationSource(inst, stateful, instanceOnly)
                if err != nil {
                        return response.InternalError(err)
                }
 
                resources := map[string][]string{}
-               resources["containers"] = []string{name}
+               resources["instances"] = []string{name}
+               resources["containers"] = resources["instances"]
 
                if req.Target != nil {
                        // Push mode

From e3460a0aafb15f4c8822a8a30fa60150bb0cb90a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 3 Dec 2019 10:33:37 +0000
Subject: [PATCH 2/6] lxd/migrate/container: Removes duplicated instance type
 checks from migrationSourceWs.Do

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/migrate_container.go | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 05e021453b..0f019db2ac 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -334,6 +334,11 @@ func (s *migrationSourceWs) preDumpLoop(args 
*preDumpLoopArgs) (bool, error) {
 
 func (s *migrationSourceWs) Do(migrateOp *operations.Operation) error {
        <-s.allConnected
+       if s.instance.Type() != instancetype.Container {
+               return fmt.Errorf("Instance is not container type")
+       }
+
+       ct := s.instance.(*containerLXC)
 
        criuType := migration.CRIUType_CRIU_RSYNC.Enum()
        if !s.live {
@@ -343,12 +348,6 @@ func (s *migrationSourceWs) Do(migrateOp 
*operations.Operation) error {
                }
        }
 
-       if s.instance.Type() != instancetype.Container {
-               return fmt.Errorf("Instance is not container type")
-       }
-
-       c := s.instance.(*containerLXC)
-
        // Storage needs to start unconditionally now, since we need to
        // initialize a new storage interface.
        ourStart, err := s.instance.StorageStart()
@@ -360,8 +359,7 @@ func (s *migrationSourceWs) Do(migrateOp 
*operations.Operation) error {
        }
 
        idmaps := make([]*migration.IDMapType, 0)
-
-       idmapset, err := c.DiskIdmap()
+       idmapset, err := ct.DiskIdmap()
        if err != nil {
                return err
        }
@@ -402,11 +400,6 @@ func (s *migrationSourceWs) Do(migrateOp 
*operations.Operation) error {
 
        // The protocol says we have to send a header no matter what, so let's
        // do that, but then immediately send an error.
-       if s.instance.Type() != instancetype.Container {
-               return fmt.Errorf("Instance type must be container")
-       }
-
-       ct := s.instance.(*containerLXC)
 
        myType := ct.Storage().MigrationType()
        hasFeature := true
@@ -650,7 +643,7 @@ func (s *migrationSourceWs) Do(migrateOp 
*operations.Operation) error {
 
                                // Do the final CRIU dump. This is needs no 
special
                                // handling if pre-dumps are used or not
-                               dumpSuccess <- c.Migrate(&criuMigrationArgs)
+                               dumpSuccess <- ct.Migrate(&criuMigrationArgs)
                                os.RemoveAll(checkpointDir)
                        }()
 
@@ -675,7 +668,7 @@ func (s *migrationSourceWs) Do(migrateOp 
*operations.Operation) error {
                                preDumpDir:   "",
                        }
 
-                       err = c.Migrate(&criuMigrationArgs)
+                       err = ct.Migrate(&criuMigrationArgs)
                        if err != nil {
                                return abort(err)
                        }

From 4ef6ac03cd7a28989b228769efff686d5ace014b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 3 Dec 2019 10:47:14 +0000
Subject: [PATCH 3/6] lxd: Removes dependency on instance.DaemonState() from
 migrationSourceWs

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/container_post.go     | 8 ++++++--
 lxd/container_snapshot.go | 8 ++++++--
 lxd/migrate_container.go  | 3 +--
 3 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/lxd/container_post.go b/lxd/container_post.go
index 9139571dd2..a1b82db3a1 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -215,6 +215,10 @@ func containerPost(d *Daemon, r *http.Request) 
response.Response {
                resources["instances"] = []string{name}
                resources["containers"] = resources["instances"]
 
+               run := func(op *operations.Operation) error {
+                       return ws.Do(d.State(), op)
+               }
+
                if req.Target != nil {
                        // Push mode
                        err := ws.ConnectContainerTarget(*req.Target)
@@ -222,7 +226,7 @@ func containerPost(d *Daemon, r *http.Request) 
response.Response {
                                return response.InternalError(err)
                        }
 
-                       op, err := operations.OperationCreate(d.State(), 
project, operations.OperationClassTask, db.OperationContainerMigrate, 
resources, nil, ws.Do, nil, nil)
+                       op, err := operations.OperationCreate(d.State(), 
project, operations.OperationClassTask, db.OperationContainerMigrate, 
resources, nil, run, nil, nil)
                        if err != nil {
                                return response.InternalError(err)
                        }
@@ -231,7 +235,7 @@ func containerPost(d *Daemon, r *http.Request) 
response.Response {
                }
 
                // Pull mode
-               op, err := operations.OperationCreate(d.State(), project, 
operations.OperationClassWebsocket, db.OperationContainerMigrate, resources, 
ws.Metadata(), ws.Do, nil, ws.Connect)
+               op, err := operations.OperationCreate(d.State(), project, 
operations.OperationClassWebsocket, db.OperationContainerMigrate, resources, 
ws.Metadata(), run, nil, ws.Connect)
                if err != nil {
                        return response.InternalError(err)
                }
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 338750d2d9..44daff38aa 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -369,6 +369,10 @@ func snapshotPost(d *Daemon, r *http.Request, sc 
instance.Instance, containerNam
                resources := map[string][]string{}
                resources["containers"] = []string{containerName}
 
+               run := func(op *operations.Operation) error {
+                       return ws.Do(d.State(), op)
+               }
+
                if req.Target != nil {
                        // Push mode
                        err := ws.ConnectContainerTarget(*req.Target)
@@ -376,7 +380,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc 
instance.Instance, containerNam
                                return response.InternalError(err)
                        }
 
-                       op, err := operations.OperationCreate(d.State(), 
sc.Project(), operations.OperationClassTask, db.OperationSnapshotTransfer, 
resources, nil, ws.Do, nil, nil)
+                       op, err := operations.OperationCreate(d.State(), 
sc.Project(), operations.OperationClassTask, db.OperationSnapshotTransfer, 
resources, nil, run, nil, nil)
                        if err != nil {
                                return response.InternalError(err)
                        }
@@ -385,7 +389,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc 
instance.Instance, containerNam
                }
 
                // Pull mode
-               op, err := operations.OperationCreate(d.State(), sc.Project(), 
operations.OperationClassWebsocket, db.OperationSnapshotTransfer, resources, 
ws.Metadata(), ws.Do, nil, ws.Connect)
+               op, err := operations.OperationCreate(d.State(), sc.Project(), 
operations.OperationClassWebsocket, db.OperationSnapshotTransfer, resources, 
ws.Metadata(), run, nil, ws.Connect)
                if err != nil {
                        return response.InternalError(err)
                }
diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 0f019db2ac..6bff998ff4 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -332,7 +332,7 @@ func (s *migrationSourceWs) preDumpLoop(args 
*preDumpLoopArgs) (bool, error) {
        return final, nil
 }
 
-func (s *migrationSourceWs) Do(migrateOp *operations.Operation) error {
+func (s *migrationSourceWs) Do(state *state.State, migrateOp 
*operations.Operation) error {
        <-s.allConnected
        if s.instance.Type() != instancetype.Container {
                return fmt.Errorf("Instance is not container type")
@@ -546,7 +546,6 @@ func (s *migrationSourceWs) Do(migrateOp 
*operations.Operation) error {
                                return abort(err)
                        }
 
-                       state := s.instance.DaemonState()
                        actionScriptOp, err := operations.OperationCreate(
                                state,
                                s.instance.Project(),

From df217102772f1aeb41db5d4a6a6d369d19ad999c Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 3 Dec 2019 10:49:27 +0000
Subject: [PATCH 4/6] lxd/storage: Removes DaemonState() from pool interface

This same function is deprecated in containerLXC with a comment:

"This function should go away, since the abstract container interface should 
not be coupled with internal state details."

Which seems a valid sentiment for the storage interface too.

At the very least it shouldnt be an exported function, but as it happens its 
not used anyway so removing it entirely.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage/backend_lxd.go    | 4 ----
 lxd/storage/backend_mock.go   | 4 ----
 lxd/storage/pool_interface.go | 4 ----
 3 files changed, 12 deletions(-)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 511f0d9a96..0a548b9142 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -34,10 +34,6 @@ type lxdBackend struct {
        logger logger.Logger
 }
 
-func (b *lxdBackend) DaemonState() *state.State {
-       return b.state
-}
-
 // ID returns the storage pool ID.
 func (b *lxdBackend) ID() int64 {
        return b.id
diff --git a/lxd/storage/backend_mock.go b/lxd/storage/backend_mock.go
index 089ff98676..b40cf40f20 100644
--- a/lxd/storage/backend_mock.go
+++ b/lxd/storage/backend_mock.go
@@ -19,10 +19,6 @@ type mockBackend struct {
        logger logger.Logger
 }
 
-func (b *mockBackend) DaemonState() *state.State {
-       return b.state
-}
-
 func (b *mockBackend) ID() int64 {
        return -1
 }
diff --git a/lxd/storage/pool_interface.go b/lxd/storage/pool_interface.go
index bd7462345f..802e4858bc 100644
--- a/lxd/storage/pool_interface.go
+++ b/lxd/storage/pool_interface.go
@@ -7,16 +7,12 @@ import (
        "github.com/lxc/lxd/lxd/instance"
        "github.com/lxc/lxd/lxd/migration"
        "github.com/lxc/lxd/lxd/operations"
-       "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/lxd/storage/drivers"
        "github.com/lxc/lxd/shared/api"
 )
 
 // Pool represents a LXD storage pool.
 type Pool interface {
-       // Internal.
-       DaemonState() *state.State
-
        // Pool.
        ID() int64
        Name() string

From 039d0c2c8df6425f9975a60844b5e205f3d4e27c Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 3 Dec 2019 12:25:28 +0000
Subject: [PATCH 5/6] lxd/migrate/storage/volumes: Removes unrelated comment

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/migrate_storage_volumes.go | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index f4a073fd92..5b2918bef0 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -122,8 +122,7 @@ func (s *migrationSourceWs) DoStorage(state *state.State, 
poolName string, volNa
        offerHeader.SnapshotNames = snapshotNames
        offerHeader.Snapshots = snapshots
 
-       // The protocol says we have to send a header no matter what, so let's
-       // do that, but then immediately send an error.
+       // Send offer to target.
        err = s.send(&offerHeader)
        if err != nil {
                logger.Errorf("Failed to send storage volume migration header")

From 9a4de2d214c16012e749d0de276f180ab0faf99f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 3 Dec 2019 12:33:29 +0000
Subject: [PATCH 6/6] lxd/migrate/container: Restructures
 migrationSourceWs.Do() ready for new storage layer.

- Adds placeholder variable for new storage pool so that logic can be grouped 
into old/new sections by detecting if it is nil.
- Cleans up comment format.
- Replaces shared header variable with offerHeader and respHeader to clearly 
differentiate between each side during negotiation.
- Make golint happy.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/migrate_container.go | 285 ++++++++++++++++++++-------------------
 1 file changed, 148 insertions(+), 137 deletions(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 6bff998ff4..9c8f5787bf 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -340,6 +340,42 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
 
        ct := s.instance.(*containerLXC)
 
+       var offerHeader migration.MigrationHeader
+
+       var pool storagePools.Pool // Placeholder for new storage pool.
+       if s.instance.Type() == instancetype.Container {
+               // Storage needs to start unconditionally now, since we need to 
initialize a new
+               // storage interface.
+               ourStart, err := s.instance.StorageStart()
+               if err != nil {
+                       return err
+               }
+               if ourStart {
+                       defer s.instance.StorageStop()
+               }
+
+               myType := ct.Storage().MigrationType()
+               hasFeature := true
+               offerHeader = migration.MigrationHeader{
+                       Fs: &myType,
+                       RsyncFeatures: &migration.RsyncFeatures{
+                               Xattrs:        &hasFeature,
+                               Delete:        &hasFeature,
+                               Compress:      &hasFeature,
+                               Bidirectional: &hasFeature,
+                       },
+               }
+
+               if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
+                       offerHeader.ZfsFeatures = &migration.ZfsFeatures{
+                               Compress: &hasFeature,
+                       }
+               }
+       } else {
+               return fmt.Errorf("Instance type not supported")
+       }
+
+       // Add CRIO info to source header.
        criuType := migration.CRIUType_CRIU_RSYNC.Enum()
        if !s.live {
                criuType = nil
@@ -347,24 +383,14 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                        criuType = migration.CRIUType_NONE.Enum()
                }
        }
+       offerHeader.Criu = criuType
 
-       // Storage needs to start unconditionally now, since we need to
-       // initialize a new storage interface.
-       ourStart, err := s.instance.StorageStart()
-       if err != nil {
-               return err
-       }
-       if ourStart {
-               defer s.instance.StorageStop()
-       }
-
+       // Add idmap info to source header.
        idmaps := make([]*migration.IDMapType, 0)
        idmapset, err := ct.DiskIdmap()
        if err != nil {
                return err
-       }
-
-       if idmapset != nil {
+       } else if idmapset != nil {
                for _, ctnIdmap := range idmapset.Idmap {
                        idmap := migration.IDMapType{
                                Isuid:    proto.Bool(ctnIdmap.Isuid),
@@ -378,9 +404,11 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                }
        }
 
+       offerHeader.Idmap = idmaps
+
+       // Add snapshot info to source header if needed.
        snapshots := []*migration.Snapshot{}
        snapshotNames := []string{}
-       // Only send snapshots when requested.
        if !s.instanceOnly {
                fullSnaps, err := s.instance.Snapshots()
                if err == nil {
@@ -392,128 +420,117 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                }
        }
 
-       use_pre_dumps := false
-       max_iterations := 0
+       offerHeader.SnapshotNames = snapshotNames
+       offerHeader.Snapshots = snapshots
+
+       // Add predump info to source header.
+       usePreDumps := false
+       maxDumpIterations := 0
        if s.live {
-               use_pre_dumps, max_iterations = s.checkForPreDumpSupport()
-       }
-
-       // The protocol says we have to send a header no matter what, so let's
-       // do that, but then immediately send an error.
-
-       myType := ct.Storage().MigrationType()
-       hasFeature := true
-       header := migration.MigrationHeader{
-               Fs:            &myType,
-               Criu:          criuType,
-               Idmap:         idmaps,
-               SnapshotNames: snapshotNames,
-               Snapshots:     snapshots,
-               Predump:       proto.Bool(use_pre_dumps),
-               RsyncFeatures: &migration.RsyncFeatures{
-                       Xattrs:        &hasFeature,
-                       Delete:        &hasFeature,
-                       Compress:      &hasFeature,
-                       Bidirectional: &hasFeature,
-               },
-       }
-
-       if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
-               header.ZfsFeatures = &migration.ZfsFeatures{
-                       Compress: &hasFeature,
-               }
+               usePreDumps, maxDumpIterations = s.checkForPreDumpSupport()
        }
 
-       err = s.send(&header)
+       offerHeader.Predump = proto.Bool(usePreDumps)
+
+       // Send offer to target.
+       err = s.send(&offerHeader)
        if err != nil {
                s.sendControl(err)
                return err
        }
 
-       err = s.recv(&header)
+       // Receive response from target.
+       var respHeader migration.MigrationHeader
+       err = s.recv(&respHeader)
        if err != nil {
                s.sendControl(err)
                return err
        }
 
-       // Handle rsync options
-       rsyncFeatures := header.GetRsyncFeaturesSlice()
+       var legacyDriver MigrationStorageSourceDriver
+       var abort func(err error) error
+       var bwlimit string
+
+       // Handle rsync options.
+       rsyncFeatures := respHeader.GetRsyncFeaturesSlice()
        if !shared.StringInSlice("bidirectional", rsyncFeatures) {
-               // If no bi-directional support, assume LXD 3.7 level
-               // NOTE: Do NOT extend this list of arguments
+               // If no bi-directional support, assume LXD 3.7 level.
+               // NOTE: Do NOT extend this list of arguments.
                rsyncFeatures = []string{"xattrs", "delete", "compress"}
        }
 
-       // Handle zfs options
-       zfsFeatures := header.GetZfsFeaturesSlice()
+       if pool == nil {
+               // Handle zfs options.
+               zfsFeatures := respHeader.GetZfsFeaturesSlice()
 
-       // Set source args
-       sourceArgs := MigrationSourceArgs{
-               Instance:      s.instance,
-               InstanceOnly:  s.instanceOnly,
-               RsyncFeatures: rsyncFeatures,
-               ZfsFeatures:   zfsFeatures,
-       }
+               // Set source args.
+               sourceArgs := MigrationSourceArgs{
+                       Instance:      s.instance,
+                       InstanceOnly:  s.instanceOnly,
+                       RsyncFeatures: rsyncFeatures,
+                       ZfsFeatures:   zfsFeatures,
+               }
 
-       // Initialize storage driver
-       driver, fsErr := ct.Storage().MigrationSource(sourceArgs)
-       if fsErr != nil {
-               s.sendControl(fsErr)
-               return fsErr
-       }
+               // Initialize storage driver.
+               var fsErr error
+               legacyDriver, fsErr = ct.Storage().MigrationSource(sourceArgs)
+               if fsErr != nil {
+                       s.sendControl(fsErr)
+                       return fsErr
+               }
 
-       bwlimit := ""
-       if header.GetRefresh() || *header.Fs != myType {
-               myType = migration.MigrationFSType_RSYNC
-               header.Fs = &myType
+               if respHeader.GetRefresh() || *offerHeader.Fs != *respHeader.Fs 
{
+                       myType := migration.MigrationFSType_RSYNC
+                       respHeader.Fs = &myType
 
-               if header.GetRefresh() {
-                       driver, _ = 
rsyncRefreshSource(header.GetSnapshotNames(), sourceArgs)
-               } else {
-                       driver, _ = rsyncMigrationSource(sourceArgs)
+                       if respHeader.GetRefresh() {
+                               legacyDriver, _ = 
rsyncRefreshSource(respHeader.GetSnapshotNames(), sourceArgs)
+                       } else {
+                               legacyDriver, _ = 
rsyncMigrationSource(sourceArgs)
+                       }
+
+                       // Check if this storage pool has a rate limit set for 
rsync.
+                       poolwritable := ct.Storage().GetStoragePoolWritable()
+                       if poolwritable.Config != nil {
+                               bwlimit = poolwritable.Config["rsync.bwlimit"]
+                       }
                }
 
-               // Check if this storage pool has a rate limit set for rsync.
-               poolwritable := ct.Storage().GetStoragePoolWritable()
-               if poolwritable.Config != nil {
-                       bwlimit = poolwritable.Config["rsync.bwlimit"]
+               // All failure paths need to do a few things to correctly 
handle errors before
+               // returning. Unfortunately, handling errors is not well-suited 
to defer as the code
+               // depends on the status of driver and the error value. The 
error value is
+               // especially tricky due to the common case of creating a new 
err variable
+               // (intentional or not) due to scoping and use of ":=".  
Capturing err in a closure
+               // for use in defer would be fragile, which defeats the purpose 
of using defer.
+               // An abort function reduces the odds of mishandling errors 
without introducing the
+               // fragility of closing on err.
+               abort = func(err error) error {
+                       legacyDriver.Cleanup()
+                       go s.sendControl(err)
+                       return err
+               }
+
+               err = legacyDriver.SendWhileRunning(s.fsConn, migrateOp, 
bwlimit, s.instanceOnly)
+               if err != nil {
+                       return abort(err)
                }
        }
 
-       // Check if the other side knows about pre-dumping and
-       // the associated rsync protocol
-       use_pre_dumps = header.GetPredump()
-       if use_pre_dumps {
+       // Check if the other side knows about pre-dumping and the associated 
rsync protocol.
+       usePreDumps = respHeader.GetPredump()
+       if usePreDumps {
                logger.Debugf("The other side does support pre-copy")
        } else {
                logger.Debugf("The other side does not support pre-copy")
        }
 
-       // All failure paths need to do a few things to correctly handle errors 
before returning.
-       // Unfortunately, handling errors is not well-suited to defer as the 
code depends on the
-       // status of driver and the error value.  The error value is especially 
tricky due to the
-       // common case of creating a new err variable (intentional or not) due 
to scoping and use
-       // of ":=".  Capturing err in a closure for use in defer would be 
fragile, which defeats
-       // the purpose of using defer.  An abort function reduces the odds of 
mishandling errors
-       // without introducing the fragility of closing on err.
-       abort := func(err error) error {
-               driver.Cleanup()
-               go s.sendControl(err)
-               return err
-       }
-
-       err = driver.SendWhileRunning(s.fsConn, migrateOp, bwlimit, 
s.instanceOnly)
-       if err != nil {
-               return abort(err)
-       }
-
        restoreSuccess := make(chan bool, 1)
        dumpSuccess := make(chan error, 1)
 
        if s.live {
-               if header.Criu == nil {
+               if respHeader.Criu == nil {
                        return abort(fmt.Errorf("Got no CRIU socket type for 
live migration"))
-               } else if *header.Criu != migration.CRIUType_CRIU_RSYNC {
+               } else if *respHeader.Criu != migration.CRIUType_CRIU_RSYNC {
                        return abort(fmt.Errorf("Formats other than criu rsync 
not understood"))
                }
 
@@ -523,22 +540,16 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                }
 
                if util.RuntimeLiblxcVersionAtLeast(2, 0, 4) {
-                       /* What happens below is slightly convoluted. Due to 
various
-                        * complications with networking, there's no easy way 
for criu
-                        * to exit and leave the container in a frozen state 
for us to
-                        * somehow resume later.
-                        *
-                        * Instead, we use what criu calls an "action-script", 
which is
-                        * basically a callback that lets us know when the dump 
is
-                        * done. (Unfortunately, we can't pass arguments, just 
an
-                        * executable path, so we write a custom action script 
with the
-                        * real command we want to run.)
-                        *
-                        * This script then hangs until the migration operation 
either
-                        * finishes successfully or fails, and exits 1 or 0, 
which
-                        * causes criu to either leave the container running or 
kill it
-                        * as we asked.
-                        */
+                       // What happens below is slightly convoluted. Due to 
various complications
+                       // with networking, there's no easy way for criu to 
exit and leave the
+                       // container in a frozen state for us to somehow resume 
later.
+                       // Instead, we use what criu calls an "action-script", 
which is basically a
+                       // callback that lets us know when the dump is done. 
(Unfortunately, we
+                       // can't pass arguments, just an executable path, so we 
write a custom
+                       // action script with the real command we want to run.)
+                       // This script then hangs until the migration operation 
either finishes
+                       // successfully or fails, and exits 1 or 0, which 
causes criu to either
+                       // leave the container running or kill it as we asked.
                        dumpDone := make(chan bool, 1)
                        actionScriptOpSecret, err := shared.RandomCryptoString()
                        if err != nil {
@@ -595,17 +606,17 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
 
                        preDumpCounter := 0
                        preDumpDir := ""
-                       if use_pre_dumps {
+                       if usePreDumps {
                                final := false
                                for !final {
                                        preDumpCounter++
-                                       if preDumpCounter < max_iterations {
+                                       if preDumpCounter < maxDumpIterations {
                                                final = false
                                        } else {
                                                final = true
                                        }
                                        dumpDir := fmt.Sprintf("%03d", 
preDumpCounter)
-                                       loop_args := preDumpLoopArgs{
+                                       loopArgs := preDumpLoopArgs{
                                                checkpointDir: checkpointDir,
                                                bwlimit:       bwlimit,
                                                preDumpDir:    preDumpDir,
@@ -613,7 +624,7 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                                                final:         final,
                                                rsyncFeatures: rsyncFeatures,
                                        }
-                                       final, err = s.preDumpLoop(&loop_args)
+                                       final, err = s.preDumpLoop(&loopArgs)
                                        if err != nil {
                                                os.RemoveAll(checkpointDir)
                                                return abort(err)
@@ -640,17 +651,17 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                                        function:     "migration",
                                }
 
-                               // Do the final CRIU dump. This is needs no 
special
-                               // handling if pre-dumps are used or not
+                               // Do the final CRIU dump. This is needs no 
special handling if
+                               // pre-dumps are used or not.
                                dumpSuccess <- ct.Migrate(&criuMigrationArgs)
                                os.RemoveAll(checkpointDir)
                        }()
 
                        select {
-                       /* the checkpoint failed, let's just abort */
+                       // The checkpoint failed, let's just abort.
                        case err = <-dumpSuccess:
                                return abort(err)
-                       /* the dump finished, let's continue on to the restore 
*/
+                       // The dump finished, let's continue on to the restore.
                        case <-dumpDone:
                                logger.Debugf("Dump finished, continuing with 
restore...")
                        }
@@ -673,13 +684,11 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                        }
                }
 
-               /*
-                * We do the serially right now, but there's really no reason 
for us
-                * to; since we have separate websockets, we can do it in 
parallel if
-                * we wanted to. However, assuming we're network bound, there's 
really
-                * no reason to do these in parallel. In the future when we're 
using
-                * p.haul's protocol, it will make sense to do these in 
parallel.
-                */
+               // We do the transger serially right now, but there's really no 
reason for us to;
+               // since we have separate websockets, we can do it in parallel 
if we wanted to.
+               // However assuming we're network bound, there's really no 
reason to do these in.
+               // parallel. In the future when we're using p.haul's protocol, 
it will make sense
+               // to do these in parallel.
                ctName, _, _ := 
shared.InstanceGetParentAndSnapshotName(s.instance.Name())
                state := s.instance.DaemonState()
                err = rsync.Send(ctName, shared.AddSlash(checkpointDir), 
&shared.WebsocketIO{Conn: s.criuConn}, nil, rsyncFeatures, bwlimit, 
state.OS.ExecPath)
@@ -688,14 +697,16 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                }
        }
 
-       if s.live || (header.Criu != nil && *header.Criu == 
migration.CRIUType_NONE) {
-               err = driver.SendAfterCheckpoint(s.fsConn, bwlimit)
-               if err != nil {
-                       return abort(err)
+       if pool == nil {
+               if s.live || (respHeader.Criu != nil && *respHeader.Criu == 
migration.CRIUType_NONE) {
+                       err = legacyDriver.SendAfterCheckpoint(s.fsConn, 
bwlimit)
+                       if err != nil {
+                               return abort(err)
+                       }
                }
-       }
 
-       driver.Cleanup()
+               legacyDriver.Cleanup()
+       }
 
        msg := migration.MigrationControl{}
        err = s.recv(&msg)
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to