The following pull request was submitted through Github. It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/8249
This e-mail was sent by the LXC bot, direct replies will not reach the author unless they happen to be subscribed to this list. === Description (from pull-request) === Modified the following four files: lxd/storage/backend_lxd.go /shared/api/instance.go /lxc/move.go /lxd/instance_post.go Attempted to address the below specifications: Have MoveInstance function use MigrateInstance and CreateInstanceFromMigration to move the data across, update the DB record as needed and finally delete the source from its storage pool. Extend api.InstancePost to add a Pool string entry that the client would use to trigger such a server-side move Update lxc/move.go to use MigrateContainer in the client codebase combined with that new Pool field to trigger it Update lxd/instance_post.go on the server side to detect that Pool field and trigger the new MoveInstance logic.
From fec87d5a2386c9d7c01754f53edb420eab2d3d25 Mon Sep 17 00:00:00 2001 From: Pranav Varanasi <varanasipra...@gmail.com> Date: Fri, 11 Dec 2020 22:08:47 -0800 Subject: [PATCH 1/2] vpranav5: Version 2, Issue #7274 Proper Support For Moving Between Pools --- lxd/instance_post.go | 13 +++ lxd/storage/backend_lxd.go | 211 +++++++++++++++++++++++++++++++++++++ 2 files changed, 224 insertions(+) diff --git a/lxd/instance_post.go b/lxd/instance_post.go index 32bb5c59e9..a4252615b4 100644 --- a/lxd/instance_post.go +++ b/lxd/instance_post.go @@ -425,6 +425,19 @@ func containerPostClusteringMigrate(d *Daemon, c instance.Instance, oldName, new return response.InternalError(err) } + + // Call MoveInstance function to trigger MoveInstance logic on the server side + op, err := dest.MoveInstance(destName, instancePost) + if err != nil { + return errors.Wrap(err, "Failed to issue move instance API request") + } + + err = op.Wait() + if err != nil { + return errors.Wrap(err, "Move instance operation failed") + } + + return operations.OperationResponse(op) } diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 048d6728bd..77a0f60288 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -3791,3 +3791,214 @@ func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData revert.Success() return nil } + + + + + + + +// MoveInstance +func (b *lxdBackend) MoveInstance(inst instance.Instance, src instance.Instance, op *operations.Operation) error { + + logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name(), "snapshots": snapshots}) + logger.Debug("MoveInstance started") + defer logger.Debug("MoveInstance finished") + + if b.Status() == api.StoragePoolStatusPending { + return fmt.Errorf("Specified pool is not fully created") + } + + if inst.Type() != src.Type() { + return fmt.Errorf("Instance types must match") + } + + if src.Type() == instancetype.VM && src.IsRunning() { + return errors.Wrap(ErrNotImplemented, "Unable to perform VM live migration") + } + + volType, err := InstanceTypeToVolumeType(inst.Type()) + if err != nil { + return err + } + + volDBType, err := VolumeTypeToDBType(volType) + if err != nil { + return err + } + + contentType := InstanceContentType(inst) + + // Get the root disk device config. + rootDiskConf, err := b.instanceRootVolumeConfig(inst) + if err != nil { + return err + } + + // b is the target storage pool to move to + + // Get the volume name on storage. + volStorageName := project.Instance(inst.Project(), inst.Name()) + + // Initialise a new volume containing the root disk config supplied in the new instance. + vol := b.newVolume(volType, contentType, volStorageName, rootDiskConf) + + if b.driver.HasVolume(vol) { + return fmt.Errorf("Cannot create volume, already exists on target") + } + + // Get the src volume name on storage. + srcVolStorageName := project.Instance(src.Project(), src.Name()) + + // We don't need to use the source instance's root disk config, so set to nil. + srcVol := b.newVolume(volType, contentType, srcVolStorageName, nil) + + revert := revert.New() + defer revert.Fail() + + srcPool, err := GetPoolByInstance(b.state, src) + if err != nil { + return err + } + + // Some driver backing stores require that running instances be frozen during copy. + if !src.IsSnapshot() && b.driver.Info().RunningCopyFreeze && src.IsRunning() && !src.IsFrozen() { + err = src.Freeze() + if err != nil { + return err + } + + defer src.Unfreeze() + } + + revert.Add(func() { b.DeleteInstance(inst, op) }) + + if b.Name() == srcPool.Name() { + logger.Debug("MoveInstance same-pool mode detected") + err = b.driver.CreateVolumeFromCopy(vol, srcVol, snapshots, op) + if err != nil { + return err + } + } else { + // We are copying volumes between storage pools so use migration system as it will + // be able to negotiate a common transfer method between pool types. + logger.Debug("MoveInstance cross-pool mode detected") + + // If we are copying snapshots, retrieve a list of snapshots from source volume. + snapshotNames := []string{} + if snapshots { + snapshots, err := VolumeSnapshotsGet(b.state, src.Project(), srcPool.Name(), src.Name(), volDBType) + if err != nil { + return err + } + + for _, snapshot := range snapshots { + _, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name) + snapshotNames = append(snapshotNames, snapShotName) + } + } + + // Negotiate the migration type to use. + offeredTypes := srcPool.MigrationTypes(contentType, false) + offerHeader := migration.TypesToHeader(offeredTypes...) + migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, false)) + if err != nil { + return fmt.Errorf("Failed to negotiate copy migration type: %v", err) + } + + var srcVolumeSize int64 + + // For VMs, get source volume size so that target can create the volume the same size. + if src.Type() == instancetype.VM { + srcVolumeSize, err = InstanceDiskBlockSize(srcPool, src, op) + if err != nil { + return errors.Wrapf(err, "Failed getting source disk size") + } + } + + ctx, cancel := context.WithCancel(context.Background()) + + // Use in-memory pipe pair to simulate a connection between the sender and receiver. + aEnd, bEnd := memorypipe.NewPipePair(ctx) + + // Run sender and receiver in separate go routines to prevent deadlocks. + aEndErrCh := make(chan error, 1) + bEndErrCh := make(chan error, 1) + go func() { + err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{ + Name: src.Name(), + Snapshots: snapshotNames, + MigrationType: migrationTypes[0], + TrackProgress: true, // Do use a progress tracker on sender. + }, op) + + if err != nil { + cancel() + } + aEndErrCh <- err + }() + + go func() { + err := b.CreateInstanceFromMigration(inst, bEnd, migration.VolumeTargetArgs{ + Name: inst.Name(), + Snapshots: snapshotNames, + MigrationType: migrationTypes[0], + VolumeSize: srcVolumeSize, // Block size setting override. + TrackProgress: false, // Do not use a progress tracker on receiver. + }, op) + + if err != nil { + cancel() + } + bEndErrCh <- err + }() + + // Capture errors from the sender and receiver from their result channels. + errs := []error{} + aEndErr := <-aEndErrCh + if aEndErr != nil { + errs = append(errs, aEndErr) + } + + bEndErr := <-bEndErrCh + if bEndErr != nil { + errs = append(errs, bEndErr) + } + + cancel() + + if len(errs) > 0 { + return fmt.Errorf("Create instance volume from copy failed: %v", errs) + } + + + // Update the db record and delete reference to src stroage pool + + // add new storage pool entry for target + db.UpdateStoragePoolAfterNodeJoin(b, volDBType) + + // delete reference to src storage pool + db.RemoveStoragePool(src.Name()) + + + + } + + err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath()) + if err != nil { + return err + } + + err = inst.DeferTemplateApply("copy") + if err != nil { + return err + } + + revert.Success() + return nil +} + + + + + From 920e65d1e5ff994d64b23f8665ebd7e41f6d9230 Mon Sep 17 00:00:00 2001 From: Pranav Varanasi <varanasipra...@gmail.com> Date: Fri, 11 Dec 2020 22:11:21 -0800 Subject: [PATCH 2/2] vpranav5: version 3, Issue #7274 Proper Support For Moving Between Pools --- lxc/move.go | 18 ++++++++++++++++++ shared/api/instance.go | 2 ++ 2 files changed, 20 insertions(+) diff --git a/lxc/move.go b/lxc/move.go index f77bf6f222..b6fb313bac 100644 --- a/lxc/move.go +++ b/lxc/move.go @@ -254,6 +254,24 @@ func moveClusterInstance(conf *config.Config, sourceResource, destResource, targ if err != nil { return errors.Wrap(err, i18n.G("Migration operation failure")) } + + + var poolString string + + // get Pool string entry to be used with MigrateContainer + req := api.InstancePost{Name: destName, Migration: true, PoolValue: poolString} + + // argument to migrate container should be of api.ContainerPost but how to use poolString + op, err := source.MigrateContainer(sourceName, req) + if err != nil { + return errors.Wrap(err, i18n.G("Migrate Container API failure")) + } + + err = op.Wait() + if err != nil { + return errors.Wrap(err, i18n.G("Migrate Container operation failure")) + } + return nil } diff --git a/shared/api/instance.go b/shared/api/instance.go index cec6b4a258..3e18056818 100644 --- a/shared/api/instance.go +++ b/shared/api/instance.go @@ -38,8 +38,10 @@ type InstancePost struct { InstanceOnly bool `json:"instance_only" yaml:"instance_only"` ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly. Target *InstancePostTarget `json:"target" yaml:"target"` + PoolValue string `json:"pool_value" yaml:"pool_value"` } + // InstancePostTarget represents the migration target host and operation. // // API extension: instances
_______________________________________________ lxc-devel mailing list lxc-devel@lists.linuxcontainers.org http://lists.linuxcontainers.org/listinfo/lxc-devel