The following pull request was submitted through Github. It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6211
This e-mail was sent by the LXC bot, direct replies will not reach the author unless they happen to be subscribed to this list. === Description (from pull-request) === Renames containerLoadByID and containerLoadByProjectAndName to their instance equivalent and changes the return type. If cases where `container` type is still needed, a type ascertain is done and if the type is not container then an error is returned or skip a step depending the scenario.
From 45586748425372fd1c263ffd82c06d29f7d20be0 Mon Sep 17 00:00:00 2001 From: Thomas Parrott <thomas.parr...@canonical.com> Date: Mon, 16 Sep 2019 13:21:22 +0100 Subject: [PATCH 1/3] lxd/backup: Changes container field to instance type Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com> --- lxd/backup.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/lxd/backup.go b/lxd/backup.go index caa498a9c2..f60d354aee 100644 --- a/lxd/backup.go +++ b/lxd/backup.go @@ -32,8 +32,8 @@ func backupLoadByName(s *state.State, project, name string) (*backup, error) { return nil, errors.Wrap(err, "Load backup from database") } - // Load the container it belongs to - c, err := containerLoadById(s, args.ContainerID) + // Load the instance it belongs to + instance, err := instanceLoadById(s, args.ContainerID) if err != nil { return nil, errors.Wrap(err, "Load container from database") } @@ -41,7 +41,7 @@ func backupLoadByName(s *state.State, project, name string) (*backup, error) { // Return the backup struct return &backup{ state: s, - container: c, + instance: instance, id: args.ID, name: name, creationDate: args.CreationDate, @@ -81,8 +81,8 @@ func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer c // backup represents a container backup type backup struct { - state *state.State - container container + state *state.State + instance Instance // Properties id int @@ -109,7 +109,7 @@ func (b *backup) Rename(newName string) error { newBackupPath := shared.VarPath("backups", newName) // Create the new backup path - backupsPath := shared.VarPath("backups", b.container.Name()) + backupsPath := shared.VarPath("backups", b.instance.Name()) if !shared.PathExists(backupsPath) { err := os.MkdirAll(backupsPath, 0700) if err != nil { @@ -141,9 +141,9 @@ func (b *backup) Rename(newName string) error { return nil } -// Delete removes a container backup +// Delete removes an instance backup func (b *backup) Delete() error { - return doBackupDelete(b.state, b.name, b.container.Name()) + return doBackupDelete(b.state, b.name, b.instance.Name()) } func (b *backup) Render() *api.InstanceBackup { @@ -322,24 +322,22 @@ func backupFixStoragePool(c *db.Cluster, b backupInfo, useDefaultPool bool) erro } func backupCreateTarball(s *state.State, path string, backup backup) error { - container := backup.container - // Create the index - pool, err := container.StoragePool() + pool, err := backup.instance.StoragePool() if err != nil { return err } indexFile := backupInfo{ - Name: container.Name(), - Backend: container.Storage().GetStorageTypeName(), - Privileged: container.IsPrivileged(), + Name: backup.instance.Name(), + Backend: backup.instance.Storage().GetStorageTypeName(), + Privileged: backup.instance.IsPrivileged(), Pool: pool, Snapshots: []string{}, } if !backup.instanceOnly { - snaps, err := container.Snapshots() + snaps, err := backup.instance.Snapshots() if err != nil { return err } @@ -367,7 +365,7 @@ func backupCreateTarball(s *state.State, path string, backup backup) error { } // Create the target path if needed - backupsPath := shared.VarPath("backups", backup.container.Name()) + backupsPath := shared.VarPath("backups", backup.instance.Name()) if !shared.PathExists(backupsPath) { err := os.MkdirAll(backupsPath, 0700) if err != nil { From ad2d40cdf694edac59047bcfee8378761a8e9701 Mon Sep 17 00:00:00 2001 From: Thomas Parrott <thomas.parr...@canonical.com> Date: Mon, 16 Sep 2019 13:21:46 +0100 Subject: [PATCH 2/3] lxd: Renames containerLoadByID to instanceLoadById and returns Instance type Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com> --- lxd/api_internal.go | 21 ++++++++++++++++++--- lxd/container.go | 2 +- lxd/storage_zfs.go | 2 +- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/lxd/api_internal.go b/lxd/api_internal.go index 06b05f3f35..e72e64c1af 100644 --- a/lxd/api_internal.go +++ b/lxd/api_internal.go @@ -125,11 +125,16 @@ func internalContainerOnStart(d *Daemon, r *http.Request) Response { return SmartError(err) } - c, err := containerLoadById(d.State(), id) + instance, err := instanceLoadById(d.State(), id) if err != nil { return SmartError(err) } + c, ok := instance.(container) + if !ok { + return SmartError(fmt.Errorf("Instance is not container type")) + } + err = c.OnStart() if err != nil { logger.Error("The start hook failed", log.Ctx{"container": c.Name(), "err": err}) @@ -151,11 +156,16 @@ func internalContainerOnStopNS(d *Daemon, r *http.Request) Response { } netns := queryParam(r, "netns") - c, err := containerLoadById(d.State(), id) + instance, err := instanceLoadById(d.State(), id) if err != nil { return SmartError(err) } + c, ok := instance.(container) + if !ok { + return SmartError(fmt.Errorf("Instance is not container type")) + } + err = c.OnStopNS(target, netns) if err != nil { logger.Error("The stopns hook failed", log.Ctx{"container": c.Name(), "err": err}) @@ -176,11 +186,16 @@ func internalContainerOnStop(d *Daemon, r *http.Request) Response { target = "unknown" } - c, err := containerLoadById(d.State(), id) + instance, err := instanceLoadById(d.State(), id) if err != nil { return SmartError(err) } + c, ok := instance.(container) + if !ok { + return SmartError(fmt.Errorf("Instance is not container type")) + } + err = c.OnStop(target) if err != nil { logger.Error("The stop hook failed", log.Ctx{"container": c.Name(), "err": err}) diff --git a/lxd/container.go b/lxd/container.go index ecae6c8ce4..933db90680 100644 --- a/lxd/container.go +++ b/lxd/container.go @@ -933,7 +933,7 @@ func containerConfigureInternal(c container) error { return nil } -func containerLoadById(s *state.State, id int) (container, error) { +func instanceLoadById(s *state.State, id int) (Instance, error) { // Get the DB record project, name, err := s.Cluster.ContainerProjectAndName(id) if err != nil { diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go index 9c75819e96..6f85037019 100644 --- a/lxd/storage_zfs.go +++ b/lxd/storage_zfs.go @@ -1999,7 +1999,7 @@ func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup backu } bwlimit := s.pool.Config["rsync.bwlimit"] - projectName := backup.container.Project() + projectName := backup.instance.Project() // Handle snapshots if !backup.instanceOnly { From 2a989d7ecde85d8eece57728125e3d3eeb792463 Mon Sep 17 00:00:00 2001 From: Thomas Parrott <thomas.parr...@canonical.com> Date: Mon, 16 Sep 2019 15:53:39 +0100 Subject: [PATCH 3/3] lxd: Renames containerLoadByProjectAndName to instanceLoadByProjectAndName - Changes return type from container to Instance Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com> --- lxd/api_internal.go | 18 +++---- lxd/backup.go | 2 +- lxd/container.go | 51 ++++++++++--------- lxd/container_backup.go | 4 +- lxd/container_console.go | 51 ++++++++++++------- lxd/container_delete.go | 2 +- lxd/container_exec.go | 50 ++++++++++--------- lxd/container_file.go | 8 +-- lxd/container_get.go | 2 +- lxd/container_lxc.go | 4 +- lxd/container_metadata.go | 12 ++--- lxd/container_patch.go | 2 +- lxd/container_post.go | 24 +++++---- lxd/container_put.go | 6 +-- lxd/container_snapshot.go | 37 +++++++++----- lxd/container_state.go | 4 +- lxd/container_test.go | 2 +- lxd/containers.go | 2 +- lxd/containers_post.go | 20 +++++--- lxd/devlxd.go | 13 ++++- lxd/images.go | 2 +- lxd/main_activateifneeded.go | 2 +- lxd/migrate.go | 10 ++-- lxd/migrate_container.go | 92 ++++++++++++++++++++++------------- lxd/patches.go | 12 ++--- lxd/storage.go | 9 +++- lxd/storage_btrfs.go | 46 +++++++++--------- lxd/storage_ceph.go | 72 +++++++++++++-------------- lxd/storage_dir.go | 6 +-- lxd/storage_lvm.go | 4 +- lxd/storage_lvm_utils.go | 2 +- lxd/storage_migration.go | 77 ++++++++++++++++------------- lxd/storage_migration_ceph.go | 6 +-- lxd/storage_migration_zfs.go | 26 +++++----- lxd/storage_zfs.go | 46 +++++++++--------- 35 files changed, 414 insertions(+), 312 deletions(-) diff --git a/lxd/api_internal.go b/lxd/api_internal.go index e72e64c1af..e6534ed8ef 100644 --- a/lxd/api_internal.go +++ b/lxd/api_internal.go @@ -125,16 +125,16 @@ func internalContainerOnStart(d *Daemon, r *http.Request) Response { return SmartError(err) } - instance, err := instanceLoadById(d.State(), id) + inst, err := instanceLoadById(d.State(), id) if err != nil { return SmartError(err) } - c, ok := instance.(container) - if !ok { + if inst.Type() != instance.TypeContainer { return SmartError(fmt.Errorf("Instance is not container type")) } + c := inst.(container) err = c.OnStart() if err != nil { logger.Error("The start hook failed", log.Ctx{"container": c.Name(), "err": err}) @@ -156,16 +156,16 @@ func internalContainerOnStopNS(d *Daemon, r *http.Request) Response { } netns := queryParam(r, "netns") - instance, err := instanceLoadById(d.State(), id) + inst, err := instanceLoadById(d.State(), id) if err != nil { return SmartError(err) } - c, ok := instance.(container) - if !ok { + if inst.Type() != instance.TypeContainer { return SmartError(fmt.Errorf("Instance is not container type")) } + c := inst.(container) err = c.OnStopNS(target, netns) if err != nil { logger.Error("The stopns hook failed", log.Ctx{"container": c.Name(), "err": err}) @@ -186,16 +186,16 @@ func internalContainerOnStop(d *Daemon, r *http.Request) Response { target = "unknown" } - instance, err := instanceLoadById(d.State(), id) + inst, err := instanceLoadById(d.State(), id) if err != nil { return SmartError(err) } - c, ok := instance.(container) - if !ok { + if inst.Type() != instance.TypeContainer { return SmartError(fmt.Errorf("Instance is not container type")) } + c := inst.(container) err = c.OnStop(target) if err != nil { logger.Error("The stop hook failed", log.Ctx{"container": c.Name(), "err": err}) diff --git a/lxd/backup.go b/lxd/backup.go index f60d354aee..16f6b7c9f6 100644 --- a/lxd/backup.go +++ b/lxd/backup.go @@ -52,7 +52,7 @@ func backupLoadByName(s *state.State, project, name string) (*backup, error) { } // Create a new backup -func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer container) error { +func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer Instance) error { // Create the database entry err := s.Cluster.ContainerBackupCreate(args) if err != nil { diff --git a/lxd/container.go b/lxd/container.go index 933db90680..de03c8e5c6 100644 --- a/lxd/container.go +++ b/lxd/container.go @@ -51,10 +51,10 @@ func init() { return identifiers, nil } - // Expose containerLoadByProjectAndName to the device package converting the response to an InstanceIdentifier. + // Expose instanceLoadByProjectAndName to the device package converting the response to an InstanceIdentifier. // This is because container types are defined in the main package and are not importable. device.InstanceLoadByProjectAndName = func(s *state.State, project, name string) (device.InstanceIdentifier, error) { - container, err := containerLoadByProjectAndName(s, project, name) + container, err := instanceLoadByProjectAndName(s, project, name) if err != nil { return nil, err } @@ -428,13 +428,13 @@ func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string, tra return c, nil } -func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer container, containerOnly bool, refresh bool) (container, error) { - var ct container +func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer Instance, containerOnly bool, refresh bool) (Instance, error) { + var ct Instance var err error if refresh { // Load the target container - ct, err = containerLoadByProjectAndName(s, args.Project, args.Name) + ct, err = instanceLoadByProjectAndName(s, args.Project, args.Name) if err != nil { refresh = false } @@ -597,11 +597,15 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine return ct, nil } -func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceContainer container) (container, error) { +func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceInstance Instance) (Instance, error) { + if sourceInstance.Type() != instance.TypeContainer { + return nil, fmt.Errorf("Instance not container type") + } + // Deal with state if args.Stateful { - if !sourceContainer.IsRunning() { - return nil, fmt.Errorf("Unable to create a stateful snapshot. The container isn't running") + if !sourceInstance.IsRunning() { + return nil, fmt.Errorf("Unable to create a stateful snapshot. The instance isn't running") } _, err := exec.LookPath("criu") @@ -609,7 +613,7 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont return nil, fmt.Errorf("Unable to create a stateful snapshot. CRIU isn't installed") } - stateDir := sourceContainer.StatePath() + stateDir := sourceInstance.StatePath() err = os.MkdirAll(stateDir, 0700) if err != nil { return nil, err @@ -635,9 +639,10 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont preDumpDir: "", } - err = sourceContainer.Migrate(&criuMigrationArgs) + c := sourceInstance.(container) + err = c.Migrate(&criuMigrationArgs) if err != nil { - os.RemoveAll(sourceContainer.StatePath()) + os.RemoveAll(sourceInstance.StatePath()) return nil, err } } @@ -649,23 +654,23 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont } // Clone the container - err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer) + err = sourceInstance.Storage().ContainerSnapshotCreate(c, sourceInstance) if err != nil { c.Delete() return nil, err } // Attempt to update backup.yaml on container - ourStart, err := sourceContainer.StorageStart() + ourStart, err := sourceInstance.StorageStart() if err != nil { c.Delete() return nil, err } if ourStart { - defer sourceContainer.StorageStop() + defer sourceInstance.StorageStop() } - err = writeBackupFile(sourceContainer) + err = writeBackupFile(sourceInstance) if err != nil { c.Delete() return nil, err @@ -673,11 +678,11 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont // Once we're done, remove the state directory if args.Stateful { - os.RemoveAll(sourceContainer.StatePath()) + os.RemoveAll(sourceInstance.StatePath()) } - eventSendLifecycle(sourceContainer.Project(), "container-snapshot-created", - fmt.Sprintf("/1.0/containers/%s", sourceContainer.Name()), + eventSendLifecycle(sourceInstance.Project(), "container-snapshot-created", + fmt.Sprintf("/1.0/containers/%s", sourceInstance.Name()), map[string]interface{}{ "snapshot_name": args.Name, }) @@ -887,7 +892,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container, return c, nil } -func containerConfigureInternal(c container) error { +func containerConfigureInternal(c Instance) error { // Find the root device _, rootDiskDevice, err := shared.GetRootDiskDevice(c.ExpandedDevices().CloneNative()) if err != nil { @@ -940,10 +945,10 @@ func instanceLoadById(s *state.State, id int) (Instance, error) { return nil, err } - return containerLoadByProjectAndName(s, project, name) + return instanceLoadByProjectAndName(s, project, name) } -func containerLoadByProjectAndName(s *state.State, project, name string) (container, error) { +func instanceLoadByProjectAndName(s *state.State, project, name string) (Instance, error) { // Get the DB record var container *db.Instance err := s.Cluster.Transaction(func(tx *db.ClusterTx) error { @@ -1133,7 +1138,7 @@ func containerLoadAllInternal(cts []db.Instance, s *state.State) ([]container, e return containers, nil } -func containerCompareSnapshots(source Instance, target container) ([]Instance, []Instance, error) { +func containerCompareSnapshots(source Instance, target Instance) ([]Instance, []Instance, error) { // Get the source snapshots sourceSnapshots, err := source.Snapshots() if err != nil { @@ -1407,7 +1412,7 @@ func pruneExpiredContainerSnapshots(ctx context.Context, d *Daemon, snapshots [] return nil } -func containerDetermineNextSnapshotName(d *Daemon, c container, defaultPattern string) (string, error) { +func containerDetermineNextSnapshotName(d *Daemon, c Instance, defaultPattern string) (string, error) { var err error pattern := c.ExpandedConfig()["snapshots.pattern"] diff --git a/lxd/container_backup.go b/lxd/container_backup.go index 93de362cd0..040e2f9443 100644 --- a/lxd/container_backup.go +++ b/lxd/container_backup.go @@ -37,7 +37,7 @@ func containerBackupsGet(d *Daemon, r *http.Request) Response { recursion := util.IsRecursionRequest(r) - c, err := containerLoadByProjectAndName(d.State(), project, cname) + c, err := instanceLoadByProjectAndName(d.State(), project, cname) if err != nil { return SmartError(err) } @@ -86,7 +86,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } diff --git a/lxd/container_console.go b/lxd/container_console.go index a637a59ecf..9aa73fea41 100644 --- a/lxd/container_console.go +++ b/lxd/container_console.go @@ -18,6 +18,7 @@ import ( "github.com/lxc/lxd/lxd/cluster" "github.com/lxc/lxd/lxd/db" + "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/util" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" @@ -25,8 +26,8 @@ import ( ) type consoleWs struct { - // container currently worked on - container container + // instance currently worked on + instance Instance // uid to chown pty to rootUid int64 @@ -232,7 +233,7 @@ func (s *consoleWs) Do(op *operation) error { return cmdErr } - consCmd := s.container.Console(slave) + consCmd := s.instance.Console(slave) consCmd.Start() consolePidChan <- consCmd.Process.Pid err = consCmd.Wait() @@ -291,31 +292,35 @@ func containerConsolePost(d *Daemon, r *http.Request) Response { return ForwardedOperationResponse(project, &opAPI) } - c, err := containerLoadByProjectAndName(d.State(), project, name) + inst, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } err = fmt.Errorf("Container is not running") - if !c.IsRunning() { + if !inst.IsRunning() { return BadRequest(err) } err = fmt.Errorf("Container is frozen") - if c.IsFrozen() { + if inst.IsFrozen() { return BadRequest(err) } ws := &consoleWs{} ws.fds = map[int]string{} - idmapset, err := c.CurrentIdmap() - if err != nil { - return InternalError(err) - } + // If the type of instance is container, setup the root UID/GID for web socket. + if inst.Type() == instance.TypeContainer { + c := inst.(container) + idmapset, err := c.CurrentIdmap() + if err != nil { + return InternalError(err) + } - if idmapset != nil { - ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) + if idmapset != nil { + ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) + } } ws.conns = map[int]*websocket.Conn{} @@ -330,13 +335,12 @@ func containerConsolePost(d *Daemon, r *http.Request) Response { ws.allConnected = make(chan bool, 1) ws.controlConnected = make(chan bool, 1) - - ws.container = c + ws.instance = inst ws.width = post.Width ws.height = post.Height resources := map[string][]string{} - resources["containers"] = []string{ws.container.Name()} + resources["containers"] = []string{ws.instance.Name()} op, err := operationCreate(d.cluster, project, operationClassWebsocket, db.OperationConsoleShow, resources, ws.Metadata(), ws.Do, nil, ws.Connect) @@ -369,11 +373,16 @@ func containerConsoleLogGet(d *Daemon, r *http.Request) Response { return BadRequest(fmt.Errorf("Querying the console buffer requires liblxc >= 3.0")) } - c, err := containerLoadByProjectAndName(d.State(), project, name) + inst, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } + if inst.Type() != instance.TypeContainer { + return SmartError(fmt.Errorf("Instance is not container type")) + } + + c := inst.(container) ent := fileResponseEntry{} if !c.IsRunning() { // Hand back the contents of the console ringbuffer logfile. @@ -418,11 +427,17 @@ func containerConsoleLogDelete(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] project := projectParam(r) - c, err := containerLoadByProjectAndName(d.State(), project, name) + inst, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } + if inst.Type() != instance.TypeContainer { + return SmartError(fmt.Errorf("Instance is not container type")) + } + + c := inst.(container) + truncateConsoleLogFile := func(path string) error { // Check that this is a regular file. We don't want to try and unlink // /dev/stderr or /dev/null or something. @@ -442,7 +457,7 @@ func containerConsoleLogDelete(d *Daemon, r *http.Request) Response { return os.Truncate(path, 0) } - if !c.IsRunning() { + if !inst.IsRunning() { consoleLogpath := c.ConsoleBufferLogPath() return SmartError(truncateConsoleLogFile(consoleLogpath)) } diff --git a/lxd/container_delete.go b/lxd/container_delete.go index 52b18a69e3..2885588d1e 100644 --- a/lxd/container_delete.go +++ b/lxd/container_delete.go @@ -25,7 +25,7 @@ func containerDelete(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } diff --git a/lxd/container_exec.go b/lxd/container_exec.go index 590fca3891..3c871fed37 100644 --- a/lxd/container_exec.go +++ b/lxd/container_exec.go @@ -19,6 +19,7 @@ import ( "github.com/lxc/lxd/lxd/cluster" "github.com/lxc/lxd/lxd/db" + "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" log "github.com/lxc/lxd/shared/log15" @@ -28,9 +29,9 @@ import ( ) type execWs struct { - command []string - container container - env map[string]string + command []string + instance Instance + env map[string]string rootUid int64 rootGid int64 @@ -310,7 +311,7 @@ func (s *execWs) Do(op *operation) error { return cmdErr } - cmd, _, attachedPid, err := s.container.Exec(s.command, s.env, stdin, stdout, stderr, false, s.cwd, s.uid, s.gid) + cmd, _, attachedPid, err := s.instance.Exec(s.command, s.env, stdin, stdout, stderr, false, s.cwd, s.uid, s.gid) if err != nil { return err } @@ -377,22 +378,22 @@ func containerExecPost(d *Daemon, r *http.Request) Response { return ForwardedOperationResponse(project, &opAPI) } - c, err := containerLoadByProjectAndName(d.State(), project, name) + inst, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } - if !c.IsRunning() { + if !inst.IsRunning() { return BadRequest(fmt.Errorf("Container is not running")) } - if c.IsFrozen() { + if inst.IsFrozen() { return BadRequest(fmt.Errorf("Container is frozen")) } env := map[string]string{} - for k, v := range c.ExpandedConfig() { + for k, v := range inst.ExpandedConfig() { if strings.HasPrefix(k, "environment.") { env[strings.TrimPrefix(k, "environment.")] = v } @@ -408,7 +409,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response { _, ok := env["PATH"] if !ok { env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - if c.FileExists("/snap") == nil { + if inst.FileExists("/snap") == nil { env["PATH"] = fmt.Sprintf("%s:/snap/bin", env["PATH"]) } } @@ -438,13 +439,16 @@ func containerExecPost(d *Daemon, r *http.Request) Response { ws := &execWs{} ws.fds = map[int]string{} - idmapset, err := c.CurrentIdmap() - if err != nil { - return InternalError(err) - } + if inst.Type() == instance.TypeContainer { + c := inst.(container) + idmapset, err := c.CurrentIdmap() + if err != nil { + return InternalError(err) + } - if idmapset != nil { - ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) + if idmapset != nil { + ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) + } } ws.conns = map[int]*websocket.Conn{} @@ -465,7 +469,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response { } ws.command = post.Command - ws.container = c + ws.instance = inst ws.env = env ws.width = post.Width @@ -476,7 +480,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response { ws.gid = post.Group resources := map[string][]string{} - resources["containers"] = []string{ws.container.Name()} + resources["containers"] = []string{ws.instance.Name()} op, err := operationCreate(d.cluster, project, operationClassWebsocket, db.OperationCommandExec, resources, ws.Metadata(), ws.Do, nil, ws.Connect) if err != nil { @@ -493,29 +497,29 @@ func containerExecPost(d *Daemon, r *http.Request) Response { if post.RecordOutput { // Prepare stdout and stderr recording - stdout, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + stdout, err := os.OpenFile(filepath.Join(inst.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } defer stdout.Close() - stderr, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + stderr, err := os.OpenFile(filepath.Join(inst.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } defer stderr.Close() // Run the command - _, cmdResult, _, cmdErr = c.Exec(post.Command, env, nil, stdout, stderr, true, post.Cwd, post.User, post.Group) + _, cmdResult, _, cmdErr = inst.Exec(post.Command, env, nil, stdout, stderr, true, post.Cwd, post.User, post.Group) // Update metadata with the right URLs metadata["return"] = cmdResult metadata["output"] = shared.Jmap{ - "1": fmt.Sprintf("/%s/containers/%s/logs/%s", version.APIVersion, c.Name(), filepath.Base(stdout.Name())), - "2": fmt.Sprintf("/%s/containers/%s/logs/%s", version.APIVersion, c.Name(), filepath.Base(stderr.Name())), + "1": fmt.Sprintf("/%s/containers/%s/logs/%s", version.APIVersion, inst.Name(), filepath.Base(stdout.Name())), + "2": fmt.Sprintf("/%s/containers/%s/logs/%s", version.APIVersion, inst.Name(), filepath.Base(stderr.Name())), } } else { - _, cmdResult, _, cmdErr = c.Exec(post.Command, env, nil, nil, nil, true, post.Cwd, post.User, post.Group) + _, cmdResult, _, cmdErr = inst.Exec(post.Command, env, nil, nil, nil, true, post.Cwd, post.User, post.Group) metadata["return"] = cmdResult } diff --git a/lxd/container_file.go b/lxd/container_file.go index 0acbb900e8..7946fe4444 100644 --- a/lxd/container_file.go +++ b/lxd/container_file.go @@ -30,7 +30,7 @@ func containerFileHandler(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -52,7 +52,7 @@ func containerFileHandler(d *Daemon, r *http.Request) Response { } } -func containerFileGet(c container, path string, r *http.Request) Response { +func containerFileGet(c Instance, path string, r *http.Request) Response { /* * Copy out of the ns to a temporary file, and then use that to serve * the request from. This prevents us from having to worry about stuff @@ -97,7 +97,7 @@ func containerFileGet(c container, path string, r *http.Request) Response { } } -func containerFilePost(c container, path string, r *http.Request) Response { +func containerFilePost(c Instance, path string, r *http.Request) Response { // Extract file ownership and mode from headers uid, gid, mode, type_, write := shared.ParseLXDFileHeaders(r.Header) @@ -150,7 +150,7 @@ func containerFilePost(c container, path string, r *http.Request) Response { } } -func containerFileDelete(c container, path string, r *http.Request) Response { +func containerFileDelete(c Instance, path string, r *http.Request) Response { err := c.FileRemove(path) if err != nil { return SmartError(err) diff --git a/lxd/container_get.go b/lxd/container_get.go index e274b64340..b76a85baa0 100644 --- a/lxd/container_get.go +++ b/lxd/container_get.go @@ -24,7 +24,7 @@ func containerGet(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go index a93d3b6504..e48c4f1a86 100644 --- a/lxd/container_lxc.go +++ b/lxd/container_lxc.go @@ -4006,7 +4006,7 @@ type backupFile struct { Volume *api.StorageVolume `yaml:"volume"` } -func writeBackupFile(c container) error { +func writeBackupFile(c Instance) error { // We only write backup files out for actual containers if c.IsSnapshot() { return nil @@ -4971,7 +4971,7 @@ func (c *containerLXC) Export(w io.Writer, properties map[string]string) error { var arch string if c.IsSnapshot() { parentName, _, _ := shared.ContainerGetParentAndSnapshotName(c.name) - parent, err := containerLoadByProjectAndName(c.state, c.project, parentName) + parent, err := instanceLoadByProjectAndName(c.state, c.project, parentName) if err != nil { ctw.Close() logger.Error("Failed exporting container", ctxMap) diff --git a/lxd/container_metadata.go b/lxd/container_metadata.go index 5ce2e2259f..bd4bdc712f 100644 --- a/lxd/container_metadata.go +++ b/lxd/container_metadata.go @@ -36,7 +36,7 @@ func containerMetadataGet(d *Daemon, r *http.Request) Response { } // Load the container - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -97,7 +97,7 @@ func containerMetadataPut(d *Daemon, r *http.Request) Response { } // Load the container - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -151,7 +151,7 @@ func containerMetadataTemplatesGet(d *Daemon, r *http.Request) Response { } // Load the container - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -245,7 +245,7 @@ func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) Response { } // Load the container - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -318,7 +318,7 @@ func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response { } // Load the container - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -357,7 +357,7 @@ func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response { } // Return the full path of a container template. -func getContainerTemplatePath(c container, filename string) (string, error) { +func getContainerTemplatePath(c Instance, filename string) (string, error) { if strings.Contains(filename, "/") { return "", fmt.Errorf("Invalid template filename") } diff --git a/lxd/container_patch.go b/lxd/container_patch.go index 25e50f8388..16b8bf3743 100644 --- a/lxd/container_patch.go +++ b/lxd/container_patch.go @@ -37,7 +37,7 @@ func containerPatch(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return NotFound(err) } diff --git a/lxd/container_post.go b/lxd/container_post.go index e428b0d098..da7e960938 100644 --- a/lxd/container_post.go +++ b/lxd/container_post.go @@ -102,7 +102,7 @@ func containerPost(d *Daemon, r *http.Request) Response { return BadRequest(fmt.Errorf("Target node is offline")) } - var c container + var inst Instance // Check whether to forward the request to the node that is running the // container. Here are the possible cases: @@ -135,7 +135,7 @@ func containerPost(d *Daemon, r *http.Request) Response { return response } - c, err = containerLoadByProjectAndName(d.State(), project, name) + inst, err = instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -171,7 +171,7 @@ func containerPost(d *Daemon, r *http.Request) Response { if req.Migration { if targetNode != "" { // Check whether the container is running. - if c != nil && c.IsRunning() { + if inst != nil && inst.IsRunning() { return BadRequest(fmt.Errorf("Container is running")) } @@ -187,7 +187,7 @@ func containerPost(d *Daemon, r *http.Request) Response { return SmartError(err) } if pool.Driver == "ceph" { - return containerPostClusteringMigrateWithCeph(d, c, project, name, req.Name, targetNode, instanceType) + return containerPostClusteringMigrateWithCeph(d, inst, project, name, req.Name, targetNode, instanceType) } // If this is not a ceph-based container, make sure @@ -199,10 +199,16 @@ func containerPost(d *Daemon, r *http.Request) Response { return SmartError(err) } - return containerPostClusteringMigrate(d, c, name, req.Name, targetNode) + return containerPostClusteringMigrate(d, inst, name, req.Name, targetNode) } instanceOnly := req.InstanceOnly || req.ContainerOnly + + if inst.Type() != instance.TypeContainer { + return SmartError(fmt.Errorf("Instance is not container type")) + } + + c := inst.(container) ws, err := NewMigrationSource(c, stateful, instanceOnly) if err != nil { return InternalError(err) @@ -242,7 +248,7 @@ func containerPost(d *Daemon, r *http.Request) Response { } run := func(*operation) error { - return c.Rename(req.Name) + return inst.Rename(req.Name) } resources := map[string][]string{} @@ -257,7 +263,7 @@ func containerPost(d *Daemon, r *http.Request) Response { } // Move a non-ceph container to another cluster node. -func containerPostClusteringMigrate(d *Daemon, c container, oldName, newName, newNode string) Response { +func containerPostClusteringMigrate(d *Daemon, c Instance, oldName, newName, newNode string) Response { cert := d.endpoints.NetworkCert() var sourceAddress string @@ -400,7 +406,7 @@ func containerPostClusteringMigrate(d *Daemon, c container, oldName, newName, ne } // Special case migrating a container backed by ceph across two cluster nodes. -func containerPostClusteringMigrateWithCeph(d *Daemon, c container, project, oldName, newName, newNode string, instanceType instance.Type) Response { +func containerPostClusteringMigrateWithCeph(d *Daemon, c Instance, project, oldName, newName, newNode string, instanceType instance.Type) Response { run := func(*operation) error { // If source node is online (i.e. we're serving the request on // it, and c != nil), let's unmap the RBD volume locally @@ -524,7 +530,7 @@ func internalClusterContainerMovedPost(d *Daemon, r *http.Request) Response { // Used after to create the appropriate mounts point after a container has been // moved. func containerPostCreateContainerMountPoint(d *Daemon, project, containerName string) error { - c, err := containerLoadByProjectAndName(d.State(), project, containerName) + c, err := instanceLoadByProjectAndName(d.State(), project, containerName) if err != nil { return errors.Wrap(err, "Failed to load moved container on target node") } diff --git a/lxd/container_put.go b/lxd/container_put.go index f3414b2e6e..93eb4132ca 100644 --- a/lxd/container_put.go +++ b/lxd/container_put.go @@ -40,7 +40,7 @@ func containerPut(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return NotFound(err) } @@ -113,12 +113,12 @@ func containerSnapRestore(s *state.State, project, name, snap string, stateful b snap = name + shared.SnapshotDelimiter + snap } - c, err := containerLoadByProjectAndName(s, project, name) + c, err := instanceLoadByProjectAndName(s, project, name) if err != nil { return err } - source, err := containerLoadByProjectAndName(s, project, snap) + source, err := instanceLoadByProjectAndName(s, project, snap) if err != nil { switch err { case db.ErrNoSuchObject: diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go index 86a383de40..51629b4d51 100644 --- a/lxd/container_snapshot.go +++ b/lxd/container_snapshot.go @@ -13,6 +13,7 @@ import ( "github.com/gorilla/mux" "github.com/lxc/lxd/lxd/db" + "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/util" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" @@ -58,7 +59,7 @@ func containerSnapshotsGet(d *Daemon, r *http.Request) Response { } } } else { - c, err := containerLoadByProjectAndName(d.State(), project, cname) + c, err := instanceLoadByProjectAndName(d.State(), project, cname) if err != nil { return SmartError(err) } @@ -109,7 +110,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response { * 2. copy the database info over * 3. copy over the rootfs */ - c, err := containerLoadByProjectAndName(d.State(), project, name) + inst, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -120,7 +121,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response { } if req.Name == "" { - req.Name, err = containerDetermineNextSnapshotName(d, c, "snap%d") + req.Name, err = containerDetermineNextSnapshotName(d, inst, "snap%d") if err != nil { return SmartError(err) } @@ -139,7 +140,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response { if req.ExpiresAt != nil { expiry = *req.ExpiresAt } else { - expiry, err = shared.GetSnapshotExpiry(time.Now(), c.LocalConfig()["snapshots.expiry"]) + expiry, err = shared.GetSnapshotExpiry(time.Now(), inst.LocalConfig()["snapshots.expiry"]) if err != nil { return BadRequest(err) } @@ -147,19 +148,25 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response { snapshot := func(op *operation) error { args := db.ContainerArgs{ - Project: c.Project(), - Architecture: c.Architecture(), - Config: c.LocalConfig(), - Type: c.Type(), + Project: inst.Project(), + Architecture: inst.Architecture(), + Config: inst.LocalConfig(), + Type: inst.Type(), Snapshot: true, - Devices: c.LocalDevices(), - Ephemeral: c.IsEphemeral(), + Devices: inst.LocalDevices(), + Ephemeral: inst.IsEphemeral(), Name: fullName, - Profiles: c.Profiles(), + Profiles: inst.Profiles(), Stateful: req.Stateful, ExpiryDate: expiry, } + if inst.Type() != instance.TypeContainer { + return fmt.Errorf("Instance is not container type") + } + + c := inst.(container) + _, err := containerCreateAsSnapshot(d.State(), args, c) if err != nil { return err @@ -201,7 +208,7 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) Response { if err != nil { return SmartError(err) } - sc, err := containerLoadByProjectAndName( + inst, err := instanceLoadByProjectAndName( d.State(), project, containerName+ shared.SnapshotDelimiter+ @@ -210,6 +217,12 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) Response { return SmartError(err) } + if inst.Type() != instance.TypeContainer { + return SmartError(fmt.Errorf("Instance not container type")) + } + + sc := inst.(container) + switch r.Method { case "GET": return snapshotGet(sc, snapshotName) diff --git a/lxd/container_state.go b/lxd/container_state.go index d2e13a0672..b5198977ca 100644 --- a/lxd/container_state.go +++ b/lxd/container_state.go @@ -31,7 +31,7 @@ func containerState(d *Daemon, r *http.Request) Response { return response } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } @@ -74,7 +74,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response { // Don't mess with containers while in setup mode <-d.readyChan - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return SmartError(err) } diff --git a/lxd/container_test.go b/lxd/container_test.go index 878b88b24c..966fdc4659 100644 --- a/lxd/container_test.go +++ b/lxd/container_test.go @@ -133,7 +133,7 @@ func (suite *containerTestSuite) TestContainer_LoadFromDB() { defer c.Delete() // Load the container and trigger initLXC() - c2, err := containerLoadByProjectAndName(suite.d.State(), "default", "testFoo") + c2, err := instanceLoadByProjectAndName(suite.d.State(), "default", "testFoo") c2.IsRunning() suite.Req.Nil(err) _, err = c2.StorageStart() diff --git a/lxd/containers.go b/lxd/containers.go index 2bbb0897c1..8272242d6b 100644 --- a/lxd/containers.go +++ b/lxd/containers.go @@ -358,7 +358,7 @@ func containerDeleteSnapshots(s *state.State, project, cname string) error { } for _, sname := range results { - sc, err := containerLoadByProjectAndName(s, project, sname) + sc, err := instanceLoadByProjectAndName(s, project, sname) if err != nil { logger.Error( "containerDeleteSnapshots: Failed to load the snapshot container", diff --git a/lxd/containers_post.go b/lxd/containers_post.go index 753db8b259..822af02534 100644 --- a/lxd/containers_post.go +++ b/lxd/containers_post.go @@ -285,12 +285,18 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp // Early check for refresh if req.Source.Refresh { // Check if the container exists - c, err = containerLoadByProjectAndName(d.State(), project, req.Name) + inst, err := instanceLoadByProjectAndName(d.State(), project, req.Name) if err != nil { req.Source.Refresh = false - } else if c.IsRunning() { + } else if inst.IsRunning() { return BadRequest(fmt.Errorf("Cannot refresh a running container")) } + + if inst.Type() != instance.TypeContainer { + return BadRequest(fmt.Errorf("Instance type not container")) + } + + c = inst.(container) } if !req.Source.Refresh { @@ -388,7 +394,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp Dialer: websocket.Dialer{ TLSClientConfig: config, NetDial: shared.RFC3493Dialer}, - Container: c, + Instance: c, Secrets: req.Source.Websockets, Push: push, Live: req.Source.Live, @@ -454,7 +460,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response } targetProject := project - source, err := containerLoadByProjectAndName(d.State(), sourceProject, req.Source.Source) + source, err := instanceLoadByProjectAndName(d.State(), sourceProject, req.Source.Source) if err != nil { return SmartError(err) } @@ -559,7 +565,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response // Early check for refresh if req.Source.Refresh { // Check if the container exists - c, err := containerLoadByProjectAndName(d.State(), targetProject, req.Name) + c, err := instanceLoadByProjectAndName(d.State(), targetProject, req.Name) if err != nil { req.Source.Refresh = false } else if c.IsRunning() { @@ -670,7 +676,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Re return fmt.Errorf("Internal import request: %v", resp.String()) } - c, err := containerLoadByProjectAndName(d.State(), project, bInfo.Name) + c, err := instanceLoadByProjectAndName(d.State(), project, bInfo.Name) if err != nil { return errors.Wrap(err, "Load container") } @@ -881,7 +887,7 @@ func containerFindStoragePool(d *Daemon, project string, req *api.InstancesPost) return storagePool, storagePoolProfile, localRootDiskDeviceKey, localRootDiskDevice, nil } -func clusterCopyContainerInternal(d *Daemon, source container, project string, req *api.InstancesPost) Response { +func clusterCopyContainerInternal(d *Daemon, source Instance, project string, req *api.InstancesPost) Response { name := req.Source.Source // Locate the source of the container diff --git a/lxd/devlxd.go b/lxd/devlxd.go index a15fd2306c..3c76c0b030 100644 --- a/lxd/devlxd.go +++ b/lxd/devlxd.go @@ -19,6 +19,7 @@ import ( "github.com/gorilla/websocket" "github.com/pborman/uuid" + "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/util" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/logger" @@ -444,7 +445,17 @@ func findContainerForPid(pid int32, d *Daemon) (container, error) { project = strings.Split(name, "_")[0] } - return containerLoadByProjectAndName(d.State(), project, name) + inst, err := instanceLoadByProjectAndName(d.State(), project, name) + if err != nil { + return nil, err + } + + if inst.Type() != instance.TypeContainer { + return nil, fmt.Errorf("Instance is not container type") + } + + c := inst.(container) + return c, nil } status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) diff --git a/lxd/images.go b/lxd/images.go index e15cefbfb6..330c6afbd3 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -186,7 +186,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, op *operati info.Public = false } - c, err := containerLoadByProjectAndName(d.State(), project, name) + c, err := instanceLoadByProjectAndName(d.State(), project, name) if err != nil { return nil, err } diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go index 16196ef827..63f0d7ab4a 100644 --- a/lxd/main_activateifneeded.go +++ b/lxd/main_activateifneeded.go @@ -122,7 +122,7 @@ func (c *cmdActivateifneeded) Run(cmd *cobra.Command, args []string) error { } for _, container := range containers { - c, err := containerLoadByProjectAndName(d.State(), container.Project, container.Name) + c, err := instanceLoadByProjectAndName(d.State(), container.Project, container.Name) if err != nil { sqldb.Close() return err diff --git a/lxd/migrate.go b/lxd/migrate.go index 109b57bb94..6f5ead8459 100644 --- a/lxd/migrate.go +++ b/lxd/migrate.go @@ -38,7 +38,7 @@ type migrationFields struct { // container specific fields live bool instanceOnly bool - container container + instance Instance // storage specific fields storage storage @@ -261,8 +261,8 @@ type MigrationSinkArgs struct { Secrets map[string]string Url string - // Container specific fields - Container container + // Instance specific fields + Instance Instance InstanceOnly bool Idmap *idmap.IdmapSet Live bool @@ -278,8 +278,8 @@ type MigrationSinkArgs struct { } type MigrationSourceArgs struct { - // Container specific fields - Container container + // Instance specific fields + Instance Instance InstanceOnly bool // Transport specific fields diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go index 188612b98e..4d893b87c7 100644 --- a/lxd/migrate_container.go +++ b/lxd/migrate_container.go @@ -16,6 +16,7 @@ import ( "gopkg.in/lxc/go-lxc.v2" "github.com/lxc/lxd/lxd/db" + "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/migration" "github.com/lxc/lxd/lxd/util" "github.com/lxc/lxd/shared" @@ -24,8 +25,8 @@ import ( "github.com/lxc/lxd/shared/logger" ) -func NewMigrationSource(c container, stateful bool, instanceOnly bool) (*migrationSourceWs, error) { - ret := migrationSourceWs{migrationFields{container: c}, make(chan bool, 1)} +func NewMigrationSource(inst Instance, stateful bool, instanceOnly bool) (*migrationSourceWs, error) { + ret := migrationSourceWs{migrationFields{instance: inst}, make(chan bool, 1)} ret.instanceOnly = instanceOnly var err error @@ -39,7 +40,7 @@ func NewMigrationSource(c container, stateful bool, instanceOnly bool) (*migrati return nil, err } - if stateful && c.IsRunning() { + if stateful && inst.IsRunning() { _, err := exec.LookPath("criu") if err != nil { return nil, fmt.Errorf("Unable to perform container live migration. CRIU isn't installed on the source server") @@ -133,7 +134,14 @@ func (s *migrationSourceWs) checkForPreDumpSupport() (bool, int) { preDumpDir: "", features: lxc.FEATURE_MEM_TRACK, } - err := s.container.Migrate(&criuMigrationArgs) + + if s.instance.Type() != instance.TypeContainer { + return false, 0 + } + + c := s.instance.(container) + + err := c.Migrate(&criuMigrationArgs) if err != nil { // CRIU says it does not know about dirty memory tracking. @@ -146,7 +154,7 @@ func (s *migrationSourceWs) checkForPreDumpSupport() (bool, int) { use_pre_dumps := true // What does the configuration say about pre-copy - tmp := s.container.ExpandedConfig()["migration.incremental.memory"] + tmp := s.instance.ExpandedConfig()["migration.incremental.memory"] if tmp != "" { use_pre_dumps = shared.IsTrue(tmp) @@ -157,7 +165,7 @@ func (s *migrationSourceWs) checkForPreDumpSupport() (bool, int) { // migration.incremental.memory.iterations is the value after which the // container will be definitely migrated, even if the remaining number // of memory pages is below the defined threshold. - tmp = s.container.ExpandedConfig()["migration.incremental.memory.iterations"] + tmp = s.instance.ExpandedConfig()["migration.incremental.memory.iterations"] if tmp != "" { max_iterations, _ = strconv.Atoi(tmp) } else { @@ -243,14 +251,20 @@ func (s *migrationSourceWs) preDumpLoop(args *preDumpLoopArgs) (bool, error) { final := args.final - err := s.container.Migrate(&criuMigrationArgs) + if s.instance.Type() != instance.TypeContainer { + return false, fmt.Errorf("Instance not container type") + } + + c := s.instance.(container) + + err := c.Migrate(&criuMigrationArgs) if err != nil { return final, err } // Send the pre-dump. - ctName, _, _ := shared.ContainerGetParentAndSnapshotName(s.container.Name()) - state := s.container.DaemonState() + ctName, _, _ := shared.ContainerGetParentAndSnapshotName(s.instance.Name()) + state := s.instance.DaemonState() err = RsyncSend(ctName, shared.AddSlash(args.checkpointDir), s.criuConn, nil, args.rsyncFeatures, args.bwlimit, state.OS.ExecPath) if err != nil { return final, err @@ -276,7 +290,7 @@ func (s *migrationSourceWs) preDumpLoop(args *preDumpLoopArgs) (bool, error) { // threshold is the percentage of memory pages that needs // to be pre-copied for the pre-copy migration to stop. var threshold int - tmp := s.container.ExpandedConfig()["migration.incremental.memory.goal"] + tmp := s.instance.ExpandedConfig()["migration.incremental.memory.goal"] if tmp != "" { threshold, _ = strconv.Atoi(tmp) } else { @@ -320,24 +334,30 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { criuType := migration.CRIUType_CRIU_RSYNC.Enum() if !s.live { criuType = nil - if s.container.IsRunning() { + if s.instance.IsRunning() { criuType = migration.CRIUType_NONE.Enum() } } + if s.instance.Type() != instance.TypeContainer { + return fmt.Errorf("Instance not container type") + } + + c := s.instance.(container) + // Storage needs to start unconditionally now, since we need to // initialize a new storage interface. - ourStart, err := s.container.StorageStart() + ourStart, err := s.instance.StorageStart() if err != nil { return err } if ourStart { - defer s.container.StorageStop() + defer s.instance.StorageStop() } idmaps := make([]*migration.IDMapType, 0) - idmapset, err := s.container.DiskIdmap() + idmapset, err := c.DiskIdmap() if err != nil { return err } @@ -360,7 +380,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { snapshotNames := []string{} // Only send snapshots when requested. if !s.instanceOnly { - fullSnaps, err := s.container.Snapshots() + fullSnaps, err := s.instance.Snapshots() if err == nil { for _, snap := range fullSnaps { snapshots = append(snapshots, snapshotToProtobuf(snap)) @@ -377,7 +397,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { // The protocol says we have to send a header no matter what, so let's // do that, but then immediately send an error. - myType := s.container.Storage().MigrationType() + myType := s.instance.Storage().MigrationType() hasFeature := true header := migration.MigrationHeader{ Fs: &myType, @@ -425,14 +445,14 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { // Set source args sourceArgs := MigrationSourceArgs{ - Container: s.container, + Instance: s.instance, InstanceOnly: s.instanceOnly, RsyncFeatures: rsyncFeatures, ZfsFeatures: zfsFeatures, } // Initialize storage driver - driver, fsErr := s.container.Storage().MigrationSource(sourceArgs) + driver, fsErr := s.instance.Storage().MigrationSource(sourceArgs) if fsErr != nil { s.sendControl(fsErr) return fsErr @@ -450,7 +470,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { } // Check if this storage pool has a rate limit set for rsync. - poolwritable := s.container.Storage().GetStoragePoolWritable() + poolwritable := s.instance.Storage().GetStoragePoolWritable() if poolwritable.Config != nil { bwlimit = poolwritable.Config["rsync.bwlimit"] } @@ -522,10 +542,10 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { return abort(err) } - state := s.container.DaemonState() + state := s.instance.DaemonState() actionScriptOp, err := operationCreate( state.Cluster, - s.container.Project(), + s.instance.Project(), operationClassWebsocket, db.OperationContainerLiveMigrate, nil, @@ -619,7 +639,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { // Do the final CRIU dump. This is needs no special // handling if pre-dumps are used or not - dumpSuccess <- s.container.Migrate(&criuMigrationArgs) + dumpSuccess <- c.Migrate(&criuMigrationArgs) os.RemoveAll(checkpointDir) }() @@ -644,7 +664,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { preDumpDir: "", } - err = s.container.Migrate(&criuMigrationArgs) + err = c.Migrate(&criuMigrationArgs) if err != nil { return abort(err) } @@ -657,8 +677,8 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { * no reason to do these in parallel. In the future when we're using * p.haul's protocol, it will make sense to do these in parallel. */ - ctName, _, _ := shared.ContainerGetParentAndSnapshotName(s.container.Name()) - state := s.container.DaemonState() + ctName, _, _ := shared.ContainerGetParentAndSnapshotName(s.instance.Name()) + state := s.instance.DaemonState() err = RsyncSend(ctName, shared.AddSlash(checkpointDir), s.criuConn, nil, rsyncFeatures, bwlimit, state.OS.ExecPath) if err != nil { return abort(err) @@ -698,7 +718,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error { func NewMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) { sink := migrationSink{ - src: migrationFields{container: args.Container, instanceOnly: args.InstanceOnly}, + src: migrationFields{instance: args.Instance, instanceOnly: args.InstanceOnly}, dest: migrationFields{instanceOnly: args.InstanceOnly}, url: args.Url, dialer: args.Dialer, @@ -756,6 +776,12 @@ func NewMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) { } func (c *migrationSink) Do(migrateOp *operation) error { + if c.src.instance.Type() != instance.TypeContainer { + return fmt.Errorf("Instance not container type") + } + + ct := c.src.instance.(container) + var err error if c.push { @@ -829,12 +855,12 @@ func (c *migrationSink) Do(migrateOp *operation) error { } } - mySink := c.src.container.Storage().MigrationSink + mySink := c.src.instance.Storage().MigrationSink if c.refresh { mySink = rsyncMigrationSink } - myType := c.src.container.Storage().MigrationType() + myType := c.src.instance.Storage().MigrationType() resp := migration.MigrationHeader{ Fs: &myType, Criu: criuType, @@ -874,7 +900,7 @@ func (c *migrationSink) Do(migrateOp *operation) error { if c.refresh { // Get our existing snapshots - targetSnapshots, err := c.src.container.Snapshots() + targetSnapshots, err := c.src.instance.Snapshots() if err != nil { controller(err) return err @@ -959,7 +985,7 @@ func (c *migrationSink) Do(migrateOp *operation) error { */ if len(header.SnapshotNames) != len(header.Snapshots) { for _, name := range header.SnapshotNames { - base := snapshotToProtobuf(c.src.container) + base := snapshotToProtobuf(c.src.instance) base.Name = &name snapshots = append(snapshots, base) } @@ -984,7 +1010,7 @@ func (c *migrationSink) Do(migrateOp *operation) error { } args := MigrationSinkArgs{ - Container: c.src.container, + Instance: c.src.instance, InstanceOnly: c.src.instanceOnly, Idmap: srcIdmap, Live: sendFinalFsDelta, @@ -999,7 +1025,7 @@ func (c *migrationSink) Do(migrateOp *operation) error { return } - err = resetContainerDiskIdmap(c.src.container, srcIdmap) + err = resetContainerDiskIdmap(ct, srcIdmap) if err != nil { fsTransfer <- err return @@ -1088,7 +1114,7 @@ func (c *migrationSink) Do(migrateOp *operation) error { // Currently we only do a single CRIU pre-dump so we // can hardcode "final" here since we know that "final" is the // folder for CRIU's final dump. - err = c.src.container.Migrate(&criuMigrationArgs) + err = ct.Migrate(&criuMigrationArgs) if err != nil { restore <- err return diff --git a/lxd/patches.go b/lxd/patches.go index 5d9ff5119e..d4b759b2b7 100644 --- a/lxd/patches.go +++ b/lxd/patches.go @@ -1150,7 +1150,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d } // Load the container from the database. - ctStruct, err := containerLoadByProjectAndName(d.State(), "default", ct) + ctStruct, err := instanceLoadByProjectAndName(d.State(), "default", ct) if err != nil { logger.Errorf("Failed to load LVM container %s: %s", ct, err) return err @@ -1303,7 +1303,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d } // Load the snapshot from the database. - csStruct, err := containerLoadByProjectAndName(d.State(), "default", cs) + csStruct, err := instanceLoadByProjectAndName(d.State(), "default", cs) if err != nil { logger.Errorf("Failed to load LVM container %s: %s", cs, err) return err @@ -1878,7 +1878,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [ // Make sure all containers and snapshots have a valid disk configuration for _, ct := range allcontainers { - c, err := containerLoadByProjectAndName(d.State(), "default", ct) + c, err := instanceLoadByProjectAndName(d.State(), "default", ct) if err != nil { continue } @@ -1981,7 +1981,7 @@ func patchContainerConfigRegen(name string, d *Daemon) error { for _, ct := range cts { // Load the container from the database. - c, err := containerLoadByProjectAndName(d.State(), "default", ct) + c, err := instanceLoadByProjectAndName(d.State(), "default", ct) if err != nil { logger.Errorf("Failed to open container '%s': %v", ct, err) continue @@ -2760,7 +2760,7 @@ func patchDevicesNewNamingScheme(name string, d *Daemon) error { } // Load the container from the database. - c, err := containerLoadByProjectAndName(d.State(), "default", ct) + c, err := instanceLoadByProjectAndName(d.State(), "default", ct) if err != nil { logger.Errorf("Failed to load container %s: %s", ct, err) return err @@ -2982,7 +2982,7 @@ func patchStorageApiPermissions(name string, d *Daemon) error { for _, ct := range cRegular { // load the container from the database - ctStruct, err := containerLoadByProjectAndName(d.State(), "default", ct) + ctStruct, err := instanceLoadByProjectAndName(d.State(), "default", ct) if err != nil { return err } diff --git a/lxd/storage.go b/lxd/storage.go index 9db3c4f91b..b0191ee4aa 100644 --- a/lxd/storage.go +++ b/lxd/storage.go @@ -13,6 +13,7 @@ import ( "github.com/lxc/lxd/lxd/db" "github.com/lxc/lxd/lxd/device" + "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/migration" "github.com/lxc/lxd/lxd/state" driver "github.com/lxc/lxd/lxd/storage" @@ -491,11 +492,17 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str if len(volumeUsedBy) > 1 { for _, ctName := range volumeUsedBy { - ct, err := containerLoadByProjectAndName(s, c.Project(), ctName) + instt, err := instanceLoadByProjectAndName(s, c.Project(), ctName) if err != nil { continue } + if instt.Type() != instance.TypeContainer { + continue + } + + ct := instt.(container) + var ctNextIdmap *idmap.IdmapSet if ct.IsRunning() { ctNextIdmap, err = ct.CurrentIdmap() diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go index 7d93d29d80..a47205b80a 100644 --- a/lxd/storage_btrfs.go +++ b/lxd/storage_btrfs.go @@ -1019,7 +1019,7 @@ func (s *storageBtrfs) copyContainer(target Instance, source Instance) error { return nil } -func (s *storageBtrfs) copySnapshot(target container, source container) error { +func (s *storageBtrfs) copySnapshot(target Instance, source Instance) error { sourceName := source.Name() targetName := target.Name() sourceContainerSubvolumeName := driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, sourceName) @@ -1166,19 +1166,19 @@ func (s *storageBtrfs) ContainerCopy(target Instance, source Instance, container } for _, snap := range snapshots { - sourceSnapshot, err := containerLoadByProjectAndName(s.s, source.Project(), snap.Name()) + sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name()) if err != nil { return err } _, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name()) newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName) - targetSnapshot, err := containerLoadByProjectAndName(s.s, target.Project(), newSnapName) + targetSnapshot, err := instanceLoadByProjectAndName(s.s, target.Project(), newSnapName) if err != nil { return err } - err = s.copySnapshot(targetSnapshot, sourceSnapshot) + err = s.copySnapshot(sourceSnapshot, targetSnapshot) if err != nil { return err } @@ -2458,14 +2458,14 @@ func (s *storageBtrfs) MigrationSource(args MigrationSourceArgs) (MigrationStora var err error var snapshots = []Instance{} if !args.InstanceOnly { - snapshots, err = args.Container.Snapshots() + snapshots, err = args.Instance.Snapshots() if err != nil { return nil, err } } sourceDriver := &btrfsMigrationSourceDriver{ - container: args.Container, + container: args.Instance, snapshots: snapshots, btrfsSnapshotNames: []string{}, btrfs: s, @@ -2555,17 +2555,17 @@ func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args M return nil } - containerName := args.Container.Name() - _, containerPool, _ := args.Container.Storage().GetContainerPoolInfo() - containersPath := driver.GetSnapshotMountPoint(args.Container.Project(), containerPool, containerName) + instanceName := args.Instance.Name() + _, instancePool, _ := args.Instance.Storage().GetContainerPoolInfo() + containersPath := driver.GetSnapshotMountPoint(args.Instance.Project(), instancePool, instanceName) if !args.InstanceOnly && len(args.Snapshots) > 0 { err := os.MkdirAll(containersPath, driver.ContainersDirMode) if err != nil { return err } - snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", containerPool, "containers-snapshots", project.Prefix(args.Container.Project(), containerName)) - snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Container.Project(), containerName)) + snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", instancePool, "containers-snapshots", project.Prefix(args.Instance.Project(), instanceName)) + snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), instanceName)) if !shared.PathExists(snapshotMntPointSymlink) { err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink) if err != nil { @@ -2575,10 +2575,10 @@ func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args M } // At this point we have already figured out the parent - // container's root disk device so we can simply + // instances's root disk device so we can simply // retrieve it from the expanded devices. parentStoragePool := "" - parentExpandedDevices := args.Container.ExpandedDevices() + parentExpandedDevices := args.Instance.ExpandedDevices() parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative()) if parentLocalRootDiskDeviceKey != "" { parentStoragePool = parentLocalRootDiskDevice["pool"] @@ -2591,7 +2591,7 @@ func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args M if !args.InstanceOnly { for _, snap := range args.Snapshots { - ctArgs := snapshotProtobufToContainerArgs(args.Container.Project(), containerName, snap) + ctArgs := snapshotProtobufToContainerArgs(args.Instance.Project(), instanceName, snap) // Ensure that snapshot and parent container have the // same storage pool in their local root disk device. @@ -2605,20 +2605,20 @@ func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args M } } - snapshotMntPoint := driver.GetSnapshotMountPoint(args.Container.Project(), containerPool, ctArgs.Name) - _, err := containerCreateEmptySnapshot(args.Container.DaemonState(), ctArgs) + snapshotMntPoint := driver.GetSnapshotMountPoint(args.Instance.Project(), instancePool, ctArgs.Name) + _, err := containerCreateEmptySnapshot(args.Instance.DaemonState(), ctArgs) if err != nil { return err } - snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Container.Project(), containerName)) - snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Container.Project(), containerName)) + snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Instance.Project(), instanceName)) + snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), instanceName)) err = driver.CreateSnapshotMountpoint(snapshotMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink) if err != nil { return err } - tmpSnapshotMntPoint, err := ioutil.TempDir(containersPath, project.Prefix(args.Container.Project(), containerName)) + tmpSnapshotMntPoint, err := ioutil.TempDir(containersPath, project.Prefix(args.Instance.Project(), instanceName)) if err != nil { return err } @@ -2637,9 +2637,9 @@ func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args M } } - /* finally, do the real container */ + /* finally, do the real instance */ containersMntPoint := driver.GetContainerMountPoint("default", s.pool.Name, "") - tmpContainerMntPoint, err := ioutil.TempDir(containersMntPoint, project.Prefix(args.Container.Project(), containerName)) + tmpContainerMntPoint, err := ioutil.TempDir(containersMntPoint, project.Prefix(args.Instance.Project(), instanceName)) if err != nil { return err } @@ -2650,8 +2650,8 @@ func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args M return err } - wrapper := StorageProgressWriter(op, "fs_progress", containerName) - containerMntPoint := driver.GetContainerMountPoint(args.Container.Project(), s.pool.Name, containerName) + wrapper := StorageProgressWriter(op, "fs_progress", instanceName) + containerMntPoint := driver.GetContainerMountPoint(args.Instance.Project(), s.pool.Name, instanceName) err = btrfsRecv("", tmpContainerMntPoint, containerMntPoint, false, wrapper) if err != nil { return err diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go index c76a3741fe..0abd661940 100644 --- a/lxd/storage_ceph.go +++ b/lxd/storage_ceph.go @@ -2817,23 +2817,23 @@ func (s *storageCeph) PreservesInodes() bool { func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) { // If the container is a snapshot, let's just send that. We don't need // to send anything else, because that's all the user asked for. - if args.Container.IsSnapshot() { + if args.Instance.IsSnapshot() { return &rbdMigrationSourceDriver{ - container: args.Container, + container: args.Instance, ceph: s, }, nil } driver := rbdMigrationSourceDriver{ - container: args.Container, - snapshots: []container{}, + container: args.Instance, + snapshots: []Instance{}, rbdSnapshotNames: []string{}, ceph: s, } - containerName := args.Container.Name() + instanceName := args.Instance.Name() if args.InstanceOnly { - logger.Debugf(`Only migrating the RBD storage volume for container "%s" on storage pool "%s`, containerName, s.pool.Name) + logger.Debugf(`Only migrating the RBD storage volume for container "%s" on storage pool "%s`, instanceName, s.pool.Name) return &driver, nil } @@ -2841,15 +2841,15 @@ func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorag // that we send the oldest to newest snapshot, hopefully saving on xfer // costs. Then, after all that, we send the container itself. snapshots, err := cephRBDVolumeListSnapshots(s.ClusterName, - s.OSDPoolName, project.Prefix(args.Container.Project(), containerName), + s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, s.UserName) if err != nil { if err != db.ErrNoSuchObject { - logger.Errorf(`Failed to list snapshots for RBD storage volume "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err) + logger.Errorf(`Failed to list snapshots for RBD storage volume "%s" on storage pool "%s": %s`, instanceName, s.pool.Name, err) return nil, err } } - logger.Debugf(`Retrieved snapshots "%v" for RBD storage volume "%s" on storage pool "%s"`, snapshots, containerName, s.pool.Name) + logger.Debugf(`Retrieved snapshots "%v" for RBD storage volume "%s" on storage pool "%s"`, snapshots, instanceName, s.pool.Name) for _, snap := range snapshots { // In the case of e.g. multiple copies running at the same time, @@ -2860,10 +2860,10 @@ func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorag continue } - lxdName := fmt.Sprintf("%s%s%s", containerName, shared.SnapshotDelimiter, snap[len("snapshot_"):]) - snapshot, err := containerLoadByProjectAndName(s.s, args.Container.Project(), lxdName) + lxdName := fmt.Sprintf("%s%s%s", instanceName, shared.SnapshotDelimiter, snap[len("snapshot_"):]) + snapshot, err := instanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName) if err != nil { - logger.Errorf(`Failed to load snapshot "%s" for RBD storage volume "%s" on storage pool "%s": %s`, lxdName, containerName, s.pool.Name, err) + logger.Errorf(`Failed to load snapshot "%s" for RBD storage volume "%s" on storage pool "%s": %s`, lxdName, instanceName, s.pool.Name, err) return nil, err } @@ -2878,7 +2878,7 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi // Check that we received a valid root disk device with a pool property // set. parentStoragePool := "" - parentExpandedDevices := args.Container.ExpandedDevices() + parentExpandedDevices := args.Instance.ExpandedDevices() parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative()) if parentLocalRootDiskDeviceKey != "" { parentStoragePool = parentLocalRootDiskDevice["pool"] @@ -2898,19 +2898,19 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi // the receiving LXD instance it also means that s.ClusterName has been // set to the correct cluster name for that LXD instance. Yeah, I think // that's actually correct. - containerName := args.Container.Name() - if !cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, project.Prefix(args.Container.Project(), containerName), storagePoolVolumeTypeNameContainer, s.UserName) { - err := cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, project.Prefix(args.Container.Project(), containerName), storagePoolVolumeTypeNameContainer, "0", s.UserName) + instanceName := args.Instance.Name() + if !cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, s.UserName) { + err := cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, "0", s.UserName) if err != nil { - logger.Errorf(`Failed to create RBD storage volume "%s" for cluster "%s" in OSD pool "%s" on storage pool "%s": %s`, containerName, s.ClusterName, s.OSDPoolName, s.pool.Name, err) + logger.Errorf(`Failed to create RBD storage volume "%s" for cluster "%s" in OSD pool "%s" on storage pool "%s": %s`, instanceName, s.ClusterName, s.OSDPoolName, s.pool.Name, err) return err } - logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`, containerName, s.pool.Name) + logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`, instanceName, s.pool.Name) } if len(args.Snapshots) > 0 { - snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Container.Project(), containerName)) - snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Container.Project(), containerName)) + snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Instance.Project(), instanceName)) + snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), instanceName)) if !shared.PathExists(snapshotMntPointSymlink) { err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink) if err != nil { @@ -2920,10 +2920,10 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi } // Now we're ready to receive the actual fs. - recvName := fmt.Sprintf("%s/container_%s", s.OSDPoolName, project.Prefix(args.Container.Project(), containerName)) + recvName := fmt.Sprintf("%s/container_%s", s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName)) for _, snap := range args.Snapshots { curSnapName := snap.GetName() - ctArgs := snapshotProtobufToContainerArgs(args.Container.Project(), containerName, snap) + ctArgs := snapshotProtobufToContainerArgs(args.Instance.Project(), instanceName, snap) // Ensure that snapshot and parent container have the same // storage pool in their local root disk device. If the root @@ -2935,12 +2935,12 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi ctArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool } } - _, err := containerCreateEmptySnapshot(args.Container.DaemonState(), ctArgs) + _, err := containerCreateEmptySnapshot(args.Instance.DaemonState(), ctArgs) if err != nil { - logger.Errorf(`Failed to create empty RBD storage volume for container "%s" on storage pool "%s: %s`, containerName, s.OSDPoolName, err) + logger.Errorf(`Failed to create empty RBD storage volume for container "%s" on storage pool "%s: %s`, instanceName, s.OSDPoolName, err) return err } - logger.Debugf(`Created empty RBD storage volume for container "%s" on storage pool "%s`, containerName, s.OSDPoolName) + logger.Debugf(`Created empty RBD storage volume for container "%s" on storage pool "%s`, instanceName, s.OSDPoolName) wrapper := StorageProgressWriter(op, "fs_progress", curSnapName) err = s.rbdRecv(conn, recvName, wrapper) @@ -2950,7 +2950,7 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi } logger.Debugf(`Received RBD storage volume "%s"`, curSnapName) - snapshotMntPoint := driver.GetSnapshotMountPoint(args.Container.Project(), s.pool.Name, fmt.Sprintf("%s/%s", containerName, *snap.Name)) + snapshotMntPoint := driver.GetSnapshotMountPoint(args.Instance.Project(), s.pool.Name, fmt.Sprintf("%s/%s", instanceName, *snap.Name)) if !shared.PathExists(snapshotMntPoint) { err := os.MkdirAll(snapshotMntPoint, 0700) if err != nil { @@ -2960,7 +2960,7 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi } defer func() { - snaps, err := cephRBDVolumeListSnapshots(s.ClusterName, s.OSDPoolName, project.Prefix(args.Container.Project(), containerName), storagePoolVolumeTypeNameContainer, s.UserName) + snaps, err := cephRBDVolumeListSnapshots(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, s.UserName) if err == nil { for _, snap := range snaps { snapOnlyName, _, _ := shared.ContainerGetParentAndSnapshotName(snap) @@ -2968,16 +2968,16 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi continue } - err := cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName, project.Prefix(args.Container.Project(), containerName), storagePoolVolumeTypeNameContainer, snapOnlyName, s.UserName) + err := cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, snapOnlyName, s.UserName) if err != nil { - logger.Warnf(`Failed to delete RBD container storage for snapshot "%s" of container "%s"`, snapOnlyName, containerName) + logger.Warnf(`Failed to delete RBD container storage for snapshot "%s" of container "%s"`, snapOnlyName, instanceName) } } } }() // receive the container itself - wrapper := StorageProgressWriter(op, "fs_progress", containerName) + wrapper := StorageProgressWriter(op, "fs_progress", instanceName) err := s.rbdRecv(conn, recvName, wrapper) if err != nil { logger.Errorf(`Failed to receive RBD storage volume "%s": %s`, recvName, err) @@ -2995,21 +2995,21 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args Mi } // Re-generate the UUID - err = s.cephRBDGenerateUUID(project.Prefix(args.Container.Project(), args.Container.Name()), storagePoolVolumeTypeNameContainer) + err = s.cephRBDGenerateUUID(project.Prefix(args.Instance.Project(), args.Instance.Name()), storagePoolVolumeTypeNameContainer) if err != nil { return err } - containerMntPoint := driver.GetContainerMountPoint(args.Container.Project(), s.pool.Name, containerName) + containerMntPoint := driver.GetContainerMountPoint(args.Instance.Project(), s.pool.Name, instanceName) err = driver.CreateContainerMountpoint( containerMntPoint, - args.Container.Path(), - args.Container.IsPrivileged()) + args.Instance.Path(), + args.Instance.IsPrivileged()) if err != nil { - logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s": %s"`, containerMntPoint, containerName, s.pool.Name, err) + logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s": %s"`, containerMntPoint, instanceName, s.pool.Name, err) return err } - logger.Debugf(`Created mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s""`, containerMntPoint, containerName, s.pool.Name) + logger.Debugf(`Created mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s""`, containerMntPoint, instanceName, s.pool.Name) return nil } diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go index ee04ffad0b..92db99569b 100644 --- a/lxd/storage_dir.go +++ b/lxd/storage_dir.go @@ -681,7 +681,7 @@ func (s *storageDir) copyContainer(target Instance, source Instance) error { return nil } -func (s *storageDir) copySnapshot(target container, targetPool string, source container, sourcePool string) error { +func (s *storageDir) copySnapshot(target Instance, targetPool string, source Instance, sourcePool string) error { sourceName := source.Name() targetName := target.Name() sourceContainerMntPoint := driver.GetSnapshotMountPoint(source.Project(), sourcePool, sourceName) @@ -783,14 +783,14 @@ func (s *storageDir) doContainerCopy(target Instance, source Instance, container } for _, snap := range snapshots { - sourceSnapshot, err := containerLoadByProjectAndName(srcState, source.Project(), snap.Name()) + sourceSnapshot, err := instanceLoadByProjectAndName(srcState, source.Project(), snap.Name()) if err != nil { return err } _, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name()) newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName) - targetSnapshot, err := containerLoadByProjectAndName(s.s, source.Project(), newSnapName) + targetSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), newSnapName) if err != nil { return err } diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go index b764dc9f59..14963e48c9 100644 --- a/lxd/storage_lvm.go +++ b/lxd/storage_lvm.go @@ -1198,12 +1198,12 @@ func (s *storageLvm) doContainerCopy(target Instance, source Instance, container logger.Debugf("Copying LVM container storage for snapshot %s to %s", snap.Name(), newSnapName) - sourceSnapshot, err := containerLoadByProjectAndName(srcState, source.Project(), snap.Name()) + sourceSnapshot, err := instanceLoadByProjectAndName(srcState, source.Project(), snap.Name()) if err != nil { return err } - targetSnapshot, err := containerLoadByProjectAndName(s.s, source.Project(), newSnapName) + targetSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), newSnapName) if err != nil { return err } diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go index 14d627c447..6875df8335 100644 --- a/lxd/storage_lvm_utils.go +++ b/lxd/storage_lvm_utils.go @@ -342,7 +342,7 @@ func (s *storageLvm) copyContainerThinpool(target Instance, source Instance, rea return nil } -func (s *storageLvm) copySnapshot(target container, source container, refresh bool) error { +func (s *storageLvm) copySnapshot(target Instance, source Instance, refresh bool) error { sourcePool, err := source.StoragePool() if err != nil { return err diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go index 2453fd5f9f..8d329cd5fa 100644 --- a/lxd/storage_migration.go +++ b/lxd/storage_migration.go @@ -147,7 +147,7 @@ func rsyncStorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSour func rsyncRefreshSource(refreshSnapshots []string, args MigrationSourceArgs) (MigrationStorageSourceDriver, error) { var snapshots = []Instance{} if !args.InstanceOnly { - allSnapshots, err := args.Container.Snapshots() + allSnapshots, err := args.Instance.Snapshots() if err != nil { return nil, err } @@ -162,20 +162,20 @@ func rsyncRefreshSource(refreshSnapshots []string, args MigrationSourceArgs) (Mi } } - return rsyncStorageSourceDriver{args.Container, snapshots, args.RsyncFeatures}, nil + return rsyncStorageSourceDriver{args.Instance, snapshots, args.RsyncFeatures}, nil } func rsyncMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) { var err error var snapshots = []Instance{} if !args.InstanceOnly { - snapshots, err = args.Container.Snapshots() + snapshots, err = args.Instance.Snapshots() if err != nil { return nil, err } } - return rsyncStorageSourceDriver{args.Container, snapshots, args.RsyncFeatures}, nil + return rsyncStorageSourceDriver{args.Instance, snapshots, args.RsyncFeatures}, nil } func snapshotProtobufToContainerArgs(project string, containerName string, snap *migration.Snapshot) db.ContainerArgs { @@ -282,18 +282,18 @@ func rsyncStorageMigrationSink(conn *websocket.Conn, op *operation, args Migrati } func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error { - ourStart, err := args.Container.StorageStart() + ourStart, err := args.Instance.StorageStart() if err != nil { return err } if ourStart { - defer args.Container.StorageStop() + defer args.Instance.StorageStop() } // At this point we have already figured out the parent container's root // disk device so we can simply retrieve it from the expanded devices. parentStoragePool := "" - parentExpandedDevices := args.Container.ExpandedDevices() + parentExpandedDevices := args.Instance.ExpandedDevices() parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative()) if parentLocalRootDiskDeviceKey != "" { parentStoragePool = parentLocalRootDiskDevice["pool"] @@ -304,12 +304,12 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA return fmt.Errorf("the container's root device is missing the pool property") } - localSnapshots, err := args.Container.Snapshots() + localSnapshots, err := args.Instance.Snapshots() if err != nil { return err } - isDirBackend := args.Container.Storage().GetStorageType() == storageTypeDir + isDirBackend := args.Instance.Storage().GetStorageType() == storageTypeDir if isDirBackend { if !args.InstanceOnly { for _, snap := range args.Snapshots { @@ -329,7 +329,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA continue } - snapArgs := snapshotProtobufToContainerArgs(args.Container.Project(), args.Container.Name(), snap) + snapArgs := snapshotProtobufToContainerArgs(args.Instance.Project(), args.Instance.Name(), snap) // Ensure that snapshot and parent container have the // same storage pool in their local root disk device. @@ -343,12 +343,12 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA } } - // Try and a load container - s, err := containerLoadByProjectAndName(args.Container.DaemonState(), - args.Container.Project(), snapArgs.Name) + // Try and a load instance + s, err := instanceLoadByProjectAndName(args.Instance.DaemonState(), + args.Instance.Project(), snapArgs.Name) if err != nil { // Create the snapshot since it doesn't seem to exist - s, err = containerCreateEmptySnapshot(args.Container.DaemonState(), snapArgs) + s, err = containerCreateEmptySnapshot(args.Instance.DaemonState(), snapArgs) if err != nil { return err } @@ -359,15 +359,18 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA return err } - err = resetContainerDiskIdmap(args.Container, args.Idmap) - if err != nil { - return err + if args.Instance.Type() == instance.TypeContainer { + c := args.Instance.(container) + err = resetContainerDiskIdmap(c, args.Idmap) + if err != nil { + return err + } } } } - wrapper := StorageProgressWriter(op, "fs_progress", args.Container.Name()) - err = RsyncRecv(shared.AddSlash(args.Container.Path()), conn, wrapper, args.RsyncFeatures) + wrapper := StorageProgressWriter(op, "fs_progress", args.Instance.Name()) + err = RsyncRecv(shared.AddSlash(args.Instance.Path()), conn, wrapper, args.RsyncFeatures) if err != nil { return err } @@ -390,7 +393,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA continue } - snapArgs := snapshotProtobufToContainerArgs(args.Container.Project(), args.Container.Name(), snap) + snapArgs := snapshotProtobufToContainerArgs(args.Instance.Project(), args.Instance.Name(), snap) // Ensure that snapshot and parent container have the // same storage pool in their local root disk device. @@ -405,20 +408,23 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA } wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName()) - err := RsyncRecv(shared.AddSlash(args.Container.Path()), conn, wrapper, args.RsyncFeatures) + err := RsyncRecv(shared.AddSlash(args.Instance.Path()), conn, wrapper, args.RsyncFeatures) if err != nil { return err } - err = resetContainerDiskIdmap(args.Container, args.Idmap) - if err != nil { - return err + if args.Instance.Type() == instance.TypeContainer { + c := args.Instance.(container) + err = resetContainerDiskIdmap(c, args.Idmap) + if err != nil { + return err + } } - _, err = containerLoadByProjectAndName(args.Container.DaemonState(), - args.Container.Project(), snapArgs.Name) + _, err = instanceLoadByProjectAndName(args.Instance.DaemonState(), + args.Instance.Project(), snapArgs.Name) if err != nil { - _, err = containerCreateAsSnapshot(args.Container.DaemonState(), snapArgs, args.Container) + _, err = containerCreateAsSnapshot(args.Instance.DaemonState(), snapArgs, args.Instance) if err != nil { return err } @@ -426,8 +432,8 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA } } - wrapper := StorageProgressWriter(op, "fs_progress", args.Container.Name()) - err = RsyncRecv(shared.AddSlash(args.Container.Path()), conn, wrapper, args.RsyncFeatures) + wrapper := StorageProgressWriter(op, "fs_progress", args.Instance.Name()) + err = RsyncRecv(shared.AddSlash(args.Instance.Path()), conn, wrapper, args.RsyncFeatures) if err != nil { return err } @@ -435,16 +441,19 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA if args.Live { /* now receive the final sync */ - wrapper := StorageProgressWriter(op, "fs_progress", args.Container.Name()) - err := RsyncRecv(shared.AddSlash(args.Container.Path()), conn, wrapper, args.RsyncFeatures) + wrapper := StorageProgressWriter(op, "fs_progress", args.Instance.Name()) + err := RsyncRecv(shared.AddSlash(args.Instance.Path()), conn, wrapper, args.RsyncFeatures) if err != nil { return err } } - err = resetContainerDiskIdmap(args.Container, args.Idmap) - if err != nil { - return err + if args.Instance.Type() == instance.TypeContainer { + c := args.Instance.(container) + err = resetContainerDiskIdmap(c, args.Idmap) + if err != nil { + return err + } } return nil diff --git a/lxd/storage_migration_ceph.go b/lxd/storage_migration_ceph.go index 83cc7cfc0c..90fa2b80a5 100644 --- a/lxd/storage_migration_ceph.go +++ b/lxd/storage_migration_ceph.go @@ -15,15 +15,15 @@ import ( ) type rbdMigrationSourceDriver struct { - container container - snapshots []container + container Instance + snapshots []Instance rbdSnapshotNames []string ceph *storageCeph runningSnapName string stoppedSnapName string } -func (s *rbdMigrationSourceDriver) Snapshots() []container { +func (s *rbdMigrationSourceDriver) Snapshots() []Instance { return s.snapshots } diff --git a/lxd/storage_migration_zfs.go b/lxd/storage_migration_zfs.go index fcfad12e5c..fe94bf6eab 100644 --- a/lxd/storage_migration_zfs.go +++ b/lxd/storage_migration_zfs.go @@ -15,8 +15,8 @@ import ( ) type zfsMigrationSourceDriver struct { - container container - snapshots []container + instance Instance + snapshots []Instance zfsSnapshotNames []string zfs *storageZfs runningSnapName string @@ -25,7 +25,7 @@ type zfsMigrationSourceDriver struct { } func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error { - sourceParentName, _, _ := shared.ContainerGetParentAndSnapshotName(s.container.Name()) + sourceParentName, _, _ := shared.ContainerGetParentAndSnapshotName(s.instance.Name()) poolName := s.zfs.getOnDiskPoolName() args := []string{"send"} @@ -37,9 +37,9 @@ func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zf } } - args = append(args, []string{fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(s.container.Project(), sourceParentName), zfsName)}...) + args = append(args, []string{fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(s.instance.Project(), sourceParentName), zfsName)}...) if zfsParent != "" { - args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(s.container.Project(), s.container.Name()), zfsParent)) + args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(s.instance.Project(), s.instance.Name()), zfsParent)) } cmd := exec.Command("zfs", args...) @@ -79,10 +79,10 @@ func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zf } func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation, bwlimit string, containerOnly bool) error { - if s.container.IsSnapshot() { - _, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(s.container.Name()) + if s.instance.IsSnapshot() { + _, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(s.instance.Name()) snapshotName := fmt.Sprintf("snapshot-%s", snapOnlyName) - wrapper := StorageProgressReader(op, "fs_progress", s.container.Name()) + wrapper := StorageProgressReader(op, "fs_progress", s.instance.Name()) return s.send(conn, snapshotName, "", wrapper) } @@ -104,11 +104,11 @@ func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *op } s.runningSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String()) - if err := zfsPoolVolumeSnapshotCreate(s.zfs.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(s.container.Project(), s.container.Name())), s.runningSnapName); err != nil { + if err := zfsPoolVolumeSnapshotCreate(s.zfs.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.runningSnapName); err != nil { return err } - wrapper := StorageProgressReader(op, "fs_progress", s.container.Name()) + wrapper := StorageProgressReader(op, "fs_progress", s.instance.Name()) if err := s.send(conn, s.runningSnapName, lastSnap, wrapper); err != nil { return err } @@ -118,7 +118,7 @@ func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *op func (s *zfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error { s.stoppedSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String()) - if err := zfsPoolVolumeSnapshotCreate(s.zfs.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(s.container.Project(), s.container.Name())), s.stoppedSnapName); err != nil { + if err := zfsPoolVolumeSnapshotCreate(s.zfs.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.stoppedSnapName); err != nil { return err } @@ -132,10 +132,10 @@ func (s *zfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwl func (s *zfsMigrationSourceDriver) Cleanup() { poolName := s.zfs.getOnDiskPoolName() if s.stoppedSnapName != "" { - zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(s.container.Project(), s.container.Name())), s.stoppedSnapName) + zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.stoppedSnapName) } if s.runningSnapName != "" { - zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(s.container.Project(), s.container.Name())), s.runningSnapName) + zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.runningSnapName) } } diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go index 6f85037019..aff449d39a 100644 --- a/lxd/storage_zfs.go +++ b/lxd/storage_zfs.go @@ -1303,7 +1303,7 @@ func (s *storageZfs) ContainerCopy(target Instance, source Instance, containerOn prev = snapshots[i-1].Name() } - sourceSnapshot, err := containerLoadByProjectAndName(s.s, source.Project(), snap.Name()) + sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name()) if err != nil { return err } @@ -1311,7 +1311,7 @@ func (s *storageZfs) ContainerCopy(target Instance, source Instance, containerOn _, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name()) prevSnapOnlyName = snapOnlyName newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName) - targetSnapshot, err := containerLoadByProjectAndName(s.s, target.Project(), newSnapName) + targetSnapshot, err := instanceLoadByProjectAndName(s.s, target.Project(), newSnapName) if err != nil { return err } @@ -1886,7 +1886,7 @@ func (s *storageZfs) doContainerOnlyBackup(tmpPath string, backup backup, source return nil } -func (s *storageZfs) doSnapshotBackup(tmpPath string, backup backup, source container, parentSnapshot string) error { +func (s *storageZfs) doSnapshotBackup(tmpPath string, backup backup, source Instance, parentSnapshot string) error { sourceName := source.Name() snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath) @@ -1935,7 +1935,7 @@ func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup bac prev = snapshots[i-1].Name() } - sourceSnapshot, err := containerLoadByProjectAndName(s.s, source.Project(), snap.Name()) + sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name()) if err != nil { return err } @@ -2518,13 +2518,13 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage /* If the container is a snapshot, let's just send that; we don't need * to send anything else, because that's all the user asked for. */ - if args.Container.IsSnapshot() { - return &zfsMigrationSourceDriver{container: args.Container, zfs: s, zfsFeatures: args.ZfsFeatures}, nil + if args.Instance.IsSnapshot() { + return &zfsMigrationSourceDriver{instance: args.Instance, zfs: s, zfsFeatures: args.ZfsFeatures}, nil } driver := zfsMigrationSourceDriver{ - container: args.Container, - snapshots: []container{}, + instance: args.Instance, + snapshots: []Instance{}, zfsSnapshotNames: []string{}, zfs: s, zfsFeatures: args.ZfsFeatures, @@ -2538,7 +2538,7 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage * is that we send the oldest to newest snapshot, hopefully saving on * xfer costs. Then, after all that, we send the container itself. */ - snapshots, err := zfsPoolListSnapshots(s.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(args.Container.Project(), args.Container.Name()))) + snapshots, err := zfsPoolListSnapshots(s.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name()))) if err != nil { return nil, err } @@ -2553,8 +2553,8 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage continue } - lxdName := fmt.Sprintf("%s%s%s", args.Container.Name(), shared.SnapshotDelimiter, snap[len("snapshot-"):]) - snapshot, err := containerLoadByProjectAndName(s.s, args.Container.Project(), lxdName) + lxdName := fmt.Sprintf("%s%s%s", args.Instance.Name(), shared.SnapshotDelimiter, snap[len("snapshot-"):]) + snapshot, err := instanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName) if err != nil { return nil, err } @@ -2568,7 +2568,7 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error { poolName := s.getOnDiskPoolName() - zfsName := fmt.Sprintf("containers/%s", project.Prefix(args.Container.Project(), args.Container.Name())) + zfsName := fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name())) zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error { zfsFsName := fmt.Sprintf("%s/%s", poolName, zfsName) args := []string{"receive", "-F", "-o", "canmount=noauto", "-o", "mountpoint=none", "-u", zfsFsName} @@ -2614,8 +2614,8 @@ func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args Mig } if len(args.Snapshots) > 0 { - snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Container.Project(), s.volume.Name)) - snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Container.Project(), args.Container.Name())) + snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Instance.Project(), s.volume.Name)) + snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), args.Instance.Name())) if !shared.PathExists(snapshotMntPointSymlink) { err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink) if err != nil { @@ -2628,7 +2628,7 @@ func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args Mig // container's root disk device so we can simply // retrieve it from the expanded devices. parentStoragePool := "" - parentExpandedDevices := args.Container.ExpandedDevices() + parentExpandedDevices := args.Instance.ExpandedDevices() parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative()) if parentLocalRootDiskDeviceKey != "" { parentStoragePool = parentLocalRootDiskDevice["pool"] @@ -2640,7 +2640,7 @@ func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args Mig } for _, snap := range args.Snapshots { - ctArgs := snapshotProtobufToContainerArgs(args.Container.Project(), args.Container.Name(), snap) + ctArgs := snapshotProtobufToContainerArgs(args.Instance.Project(), args.Instance.Name(), snap) // Ensure that snapshot and parent container have the // same storage pool in their local root disk device. @@ -2653,18 +2653,18 @@ func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args Mig ctArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool } } - _, err := containerCreateEmptySnapshot(args.Container.DaemonState(), ctArgs) + _, err := containerCreateEmptySnapshot(args.Instance.DaemonState(), ctArgs) if err != nil { return err } wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName()) - name := fmt.Sprintf("containers/%s@snapshot-%s", project.Prefix(args.Container.Project(), args.Container.Name()), snap.GetName()) + name := fmt.Sprintf("containers/%s@snapshot-%s", project.Prefix(args.Instance.Project(), args.Instance.Name()), snap.GetName()) if err := zfsRecv(name, wrapper); err != nil { return err } - snapshotMntPoint := driver.GetSnapshotMountPoint(args.Container.Project(), poolName, fmt.Sprintf("%s/%s", args.Container.Name(), *snap.Name)) + snapshotMntPoint := driver.GetSnapshotMountPoint(args.Instance.Project(), poolName, fmt.Sprintf("%s/%s", args.Instance.Name(), *snap.Name)) if !shared.PathExists(snapshotMntPoint) { err := os.MkdirAll(snapshotMntPoint, 0700) if err != nil { @@ -2675,7 +2675,7 @@ func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args Mig defer func() { /* clean up our migration-send snapshots that we got from recv. */ - zfsSnapshots, err := zfsPoolListSnapshots(poolName, fmt.Sprintf("containers/%s", project.Prefix(args.Container.Project(), args.Container.Name()))) + zfsSnapshots, err := zfsPoolListSnapshots(poolName, fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name()))) if err != nil { logger.Errorf("Failed listing snapshots post migration: %s", err) return @@ -2687,19 +2687,19 @@ func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args Mig continue } - zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(args.Container.Project(), args.Container.Name())), snap) + zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name())), snap) } }() /* finally, do the real container */ - wrapper := StorageProgressWriter(op, "fs_progress", args.Container.Name()) + wrapper := StorageProgressWriter(op, "fs_progress", args.Instance.Name()) if err := zfsRecv(zfsName, wrapper); err != nil { return err } if args.Live { /* and again for the post-running snapshot if this was a live migration */ - wrapper := StorageProgressWriter(op, "fs_progress", args.Container.Name()) + wrapper := StorageProgressWriter(op, "fs_progress", args.Instance.Name()) if err := zfsRecv(zfsName, wrapper); err != nil { return err }
_______________________________________________ lxc-devel mailing list lxc-devel@lists.linuxcontainers.org http://lists.linuxcontainers.org/listinfo/lxc-devel