The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6219

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
- Adds `vmQemu` struct implements (non-functional) the `Instance` interface.
- Adds `instanceInstantiate` function that wraps `containerLXCLoad` and `vmQemuLoad` based on Instance type.
- Replaces use of `containerLXCLoad` with `instanceInstantiate`.
From 32905cc9c26be901e976c8f6aab6e4714f28d0f6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 16 Sep 2019 17:55:10 +0100
Subject: [PATCH 1/6] lxd: Renames containerLoadNodeAll to instanceLoadNodeAll

- And changes return type to []Instance

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/container.go             | 108 ++++++++++++++++++-----------------
 lxd/container_lxc.go         |   4 +-
 lxd/containers.go            |  42 +++++++-------
 lxd/containers_get.go        |   6 +-
 lxd/daemon.go                |   6 +-
 lxd/devices.go               |  38 ++++++------
 lxd/devlxd.go                |  14 +++--
 lxd/networks.go              |  45 ++++++++-------
 lxd/networks_utils.go        |  25 ++++----
 lxd/storage_volumes_utils.go |  45 +++++++--------
 10 files changed, 173 insertions(+), 160 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index de03c8e5c6..45c7687dba 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -35,10 +35,10 @@ import (
 )
 
 func init() {
-       // Expose containerLoadNodeAll to the device package converting the 
response to a slice of InstanceIdentifiers.
+       // Expose instanceLoadNodeAll to the device package converting the 
response to a slice of InstanceIdentifiers.
        // This is because container types are defined in the main package and 
are not importable.
        device.InstanceLoadNodeAll = func(s *state.State) 
([]device.InstanceIdentifier, error) {
-               containers, err := containerLoadNodeAll(s)
+               containers, err := instanceLoadNodeAll(s)
                if err != nil {
                        return nil, err
                }
@@ -994,7 +994,7 @@ func instanceLoadByProjectAndName(s *state.State, project, 
name string) (Instanc
        return c, nil
 }
 
-func containerLoadByProject(s *state.State, project string) ([]container, 
error) {
+func instanceLoadByProject(s *state.State, project string) ([]Instance, error) 
{
        // Get all the containers
        var cts []db.Instance
        err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1014,11 +1014,11 @@ func containerLoadByProject(s *state.State, project 
string) ([]container, error)
                return nil, err
        }
 
-       return containerLoadAllInternal(cts, s)
+       return instanceLoadAllInternal(cts, s)
 }
 
-// Load all containers across all projects.
-func containerLoadFromAllProjects(s *state.State) ([]container, error) {
+// Load all instances across all projects.
+func instanceLoadFromAllProjects(s *state.State) ([]Instance, error) {
        var projects []string
 
        err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1030,25 +1030,25 @@ func containerLoadFromAllProjects(s *state.State) 
([]container, error) {
                return nil, err
        }
 
-       containers := []container{}
+       instances := []Instance{}
        for _, project := range projects {
-               projectContainers, err := containerLoadByProject(s, project)
+               projectInstances, err := instanceLoadByProject(s, project)
                if err != nil {
-                       return nil, errors.Wrapf(nil, "Load containers in 
project %s", project)
+                       return nil, errors.Wrapf(nil, "Load instances in 
project %s", project)
                }
-               containers = append(containers, projectContainers...)
+               instances = append(instances, projectInstances...)
        }
 
-       return containers, nil
+       return instances, nil
 }
 
 // Legacy interface.
-func containerLoadAll(s *state.State) ([]container, error) {
-       return containerLoadByProject(s, "default")
+func instanceLoadAll(s *state.State) ([]Instance, error) {
+       return instanceLoadByProject(s, "default")
 }
 
-// Load all containers of this nodes.
-func containerLoadNodeAll(s *state.State) ([]container, error) {
+// Load all instances of this nodes.
+func instanceLoadNodeAll(s *state.State) ([]Instance, error) {
        // Get all the container arguments
        var cts []db.Instance
        err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1064,11 +1064,11 @@ func containerLoadNodeAll(s *state.State) ([]container, 
error) {
                return nil, err
        }
 
-       return containerLoadAllInternal(cts, s)
+       return instanceLoadAllInternal(cts, s)
 }
 
-// Load all containers of this nodes under the given project.
-func containerLoadNodeProjectAll(s *state.State, project string, instanceType 
instance.Type) ([]container, error) {
+// Load all instances of this nodes under the given project.
+func instanceLoadNodeProjectAll(s *state.State, project string, instanceType 
instance.Type) ([]Instance, error) {
        // Get all the container arguments
        var cts []db.Instance
        err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -1084,19 +1084,19 @@ func containerLoadNodeProjectAll(s *state.State, 
project string, instanceType in
                return nil, err
        }
 
-       return containerLoadAllInternal(cts, s)
+       return instanceLoadAllInternal(cts, s)
 }
 
-func containerLoadAllInternal(cts []db.Instance, s *state.State) ([]container, 
error) {
+func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) 
([]Instance, error) {
        // Figure out what profiles are in use
        profiles := map[string]map[string]api.Profile{}
-       for _, cArgs := range cts {
-               projectProfiles, ok := profiles[cArgs.Project]
+       for _, instArgs := range dbInstances {
+               projectProfiles, ok := profiles[instArgs.Project]
                if !ok {
                        projectProfiles = map[string]api.Profile{}
-                       profiles[cArgs.Project] = projectProfiles
+                       profiles[instArgs.Project] = projectProfiles
                }
-               for _, profile := range cArgs.Profiles {
+               for _, profile := range instArgs.Profiles {
                        _, ok := projectProfiles[profile]
                        if !ok {
                                projectProfiles[profile] = api.Profile{}
@@ -1116,26 +1116,30 @@ func containerLoadAllInternal(cts []db.Instance, s 
*state.State) ([]container, e
                }
        }
 
-       // Load the container structs
-       containers := []container{}
-       for _, container := range cts {
-               // Figure out the container's profiles
+       // Load the instances structs
+       instances := []Instance{}
+       for _, dbInstance := range dbInstances {
+               // Figure out the instances's profiles
                cProfiles := []api.Profile{}
-               for _, name := range container.Profiles {
-                       cProfiles = append(cProfiles, 
profiles[container.Project][name])
+               for _, name := range dbInstance.Profiles {
+                       cProfiles = append(cProfiles, 
profiles[dbInstance.Project][name])
                }
 
-               args := db.ContainerToArgs(&container)
-
-               ct, err := containerLXCLoad(s, args, cProfiles)
-               if err != nil {
-                       return nil, err
+               if dbInstance.Type == instance.TypeContainer {
+                       args := db.ContainerToArgs(&dbInstance)
+                       ct, err := containerLXCLoad(s, args, cProfiles)
+                       if err != nil {
+                               return nil, err
+                       }
+                       instances = append(instances, ct)
+               } else {
+                       // TODO add virtual machine load here.
+                       continue
                }
 
-               containers = append(containers, ct)
        }
 
-       return containers, nil
+       return instances, nil
 }
 
 func containerCompareSnapshots(source Instance, target Instance) ([]Instance, 
[]Instance, error) {
@@ -1190,15 +1194,15 @@ func containerCompareSnapshots(source Instance, target 
Instance) ([]Instance, []
 
 func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
        f := func(ctx context.Context) {
-               // Load all local containers
-               allContainers, err := containerLoadNodeAll(d.State())
+               // Load all local instances
+               allContainers, err := instanceLoadNodeAll(d.State())
                if err != nil {
                        logger.Error("Failed to load containers for scheduled 
snapshots", log.Ctx{"err": err})
                        return
                }
 
                // Figure out which need snapshotting (if any)
-               containers := []container{}
+               instances := []Instance{}
                for _, c := range allContainers {
                        schedule := c.ExpandedConfig()["snapshots.schedule"]
 
@@ -1237,15 +1241,15 @@ func autoCreateContainerSnapshotsTask(d *Daemon) 
(task.Func, task.Schedule) {
                                continue
                        }
 
-                       containers = append(containers, c)
+                       instances = append(instances, c)
                }
 
-               if len(containers) == 0 {
+               if len(instances) == 0 {
                        return
                }
 
                opRun := func(op *operation) error {
-                       return autoCreateContainerSnapshots(ctx, d, containers)
+                       return autoCreateContainerSnapshots(ctx, d, instances)
                }
 
                op, err := operationCreate(d.cluster, "", operationClassTask, 
db.OperationSnapshotCreate, nil, nil, opRun, nil, nil)
@@ -1279,9 +1283,9 @@ func autoCreateContainerSnapshotsTask(d *Daemon) 
(task.Func, task.Schedule) {
        return f, schedule
 }
 
-func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, containers 
[]container) error {
+func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, instances 
[]Instance) error {
        // Make the snapshots
-       for _, c := range containers {
+       for _, c := range instances {
                ch := make(chan error)
                go func() {
                        snapshotName, err := 
containerDetermineNextSnapshotName(d, c, "snap%d")
@@ -1333,16 +1337,16 @@ func autoCreateContainerSnapshots(ctx context.Context, 
d *Daemon, containers []c
 
 func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
        f := func(ctx context.Context) {
-               // Load all local containers
-               allContainers, err := containerLoadNodeAll(d.State())
+               // Load all local instances
+               allInstances, err := instanceLoadNodeAll(d.State())
                if err != nil {
-                       logger.Error("Failed to load containers for snapshot 
expiry", log.Ctx{"err": err})
+                       logger.Error("Failed to load instances for snapshot 
expiry", log.Ctx{"err": err})
                        return
                }
 
                // Figure out which need snapshotting (if any)
                expiredSnapshots := []Instance{}
-               for _, c := range allContainers {
+               for _, c := range allInstances {
                        snapshots, err := c.Snapshots()
                        if err != nil {
                                logger.Error("Failed to list snapshots", 
log.Ctx{"err": err, "container": c.Name(), "project": c.Project()})
@@ -1375,14 +1379,14 @@ func pruneExpiredContainerSnapshotsTask(d *Daemon) 
(task.Func, task.Schedule) {
                        return
                }
 
-               logger.Info("Pruning expired container snapshots")
+               logger.Info("Pruning expired instance snapshots")
 
                _, err = op.Run()
                if err != nil {
-                       logger.Error("Failed to remove expired container 
snapshots", log.Ctx{"err": err})
+                       logger.Error("Failed to remove expired instance 
snapshots", log.Ctx{"err": err})
                }
 
-               logger.Info("Done pruning expired container snapshots")
+               logger.Info("Done pruning expired instance snapshots")
        }
 
        first := true
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index e48c4f1a86..93b7beaf06 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -867,7 +867,7 @@ func findIdmap(state *state.State, cName string, 
isolatedStr string, configBase
        idmapLock.Lock()
        defer idmapLock.Unlock()
 
-       cts, err := containerLoadAll(state)
+       cts, err := instanceLoadAll(state)
        if err != nil {
                return nil, 0, err
        }
@@ -3391,7 +3391,7 @@ func (c *containerLXC) Snapshots() ([]Instance, error) {
        }
 
        // Build the snapshot list
-       containers, err := containerLoadAllInternal(snaps, c.state)
+       containers, err := instanceLoadAllInternal(snaps, c.state)
        if err != nil {
                return nil, err
        }
diff --git a/lxd/containers.go b/lxd/containers.go
index 8272242d6b..a340642a00 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -141,7 +141,7 @@ var instanceBackupExportCmd = APIEndpoint{
        Get: APIEndpointAction{Handler: containerBackupExportGet, 
AccessHandler: AllowProjectPermission("containers", "view")},
 }
 
-type containerAutostartList []container
+type containerAutostartList []Instance
 
 func (slice containerAutostartList) Len() int {
        return len(slice)
@@ -165,22 +165,22 @@ func (slice containerAutostartList) Swap(i, j int) {
 }
 
 func containersRestart(s *state.State) error {
-       // Get all the containers
-       result, err := containerLoadNodeAll(s)
+       // Get all the instances
+       result, err := instanceLoadNodeAll(s)
        if err != nil {
                return err
        }
 
-       containers := []container{}
+       instances := []Instance{}
 
        for _, c := range result {
-               containers = append(containers, c)
+               instances = append(instances, c)
        }
 
-       sort.Sort(containerAutostartList(containers))
+       sort.Sort(containerAutostartList(instances))
 
-       // Restart the containers
-       for _, c := range containers {
+       // Restart the instances
+       for _, c := range instances {
                config := c.ExpandedConfig()
                lastState := config["volatile.last_state.power"]
 
@@ -207,7 +207,7 @@ func containersRestart(s *state.State) error {
        return nil
 }
 
-type containerStopList []container
+type containerStopList []Instance
 
 func (slice containerStopList) Len() int {
        return len(slice)
@@ -263,12 +263,12 @@ func containersShutdown(s *state.State) error {
 
        dbAvailable := true
 
-       // Get all the containers
-       containers, err := containerLoadNodeAll(s)
+       // Get all the instances
+       instances, err := instanceLoadNodeAll(s)
        if err != nil {
                // Mark database as offline
                dbAvailable = false
-               containers = []container{}
+               instances = []Instance{}
 
                // List all containers on disk
                cnames, err := containersOnDisk()
@@ -287,12 +287,12 @@ func containersShutdown(s *state.State) error {
                                        return err
                                }
 
-                               containers = append(containers, c)
+                               instances = append(instances, c)
                        }
                }
        }
 
-       sort.Sort(containerStopList(containers))
+       sort.Sort(containerStopList(instances))
 
        if dbAvailable {
                // Reset all container states
@@ -304,18 +304,18 @@ func containersShutdown(s *state.State) error {
 
        var lastPriority int
 
-       if len(containers) != 0 {
-               lastPriority, _ = 
strconv.Atoi(containers[0].ExpandedConfig()["boot.stop.priority"])
+       if len(instances) != 0 {
+               lastPriority, _ = 
strconv.Atoi(instances[0].ExpandedConfig()["boot.stop.priority"])
        }
 
-       for _, c := range containers {
+       for _, c := range instances {
                priority, _ := 
strconv.Atoi(c.ExpandedConfig()["boot.stop.priority"])
 
                // Enforce shutdown priority
                if priority != lastPriority {
                        lastPriority = priority
 
-                       // Wait for containers with higher priority to finish
+                       // Wait for instances with higher priority to finish
                        wg.Wait()
                }
 
@@ -324,7 +324,7 @@ func containersShutdown(s *state.State) error {
 
                // Stop the container
                if lastState != "BROKEN" && lastState != "STOPPED" {
-                       // Determinate how long to wait for the container to 
shutdown cleanly
+                       // Determinate how long to wait for the instance to 
shutdown cleanly
                        var timeoutSeconds int
                        value, ok := 
c.ExpandedConfig()["boot.host_shutdown_timeout"]
                        if ok {
@@ -333,9 +333,9 @@ func containersShutdown(s *state.State) error {
                                timeoutSeconds = 30
                        }
 
-                       // Stop the container
+                       // Stop the instance
                        wg.Add(1)
-                       go func(c container, lastState string) {
+                       go func(c Instance, lastState string) {
                                c.Shutdown(time.Second * 
time.Duration(timeoutSeconds))
                                c.Stop(false)
                                
c.VolatileSet(map[string]string{"volatile.last_state.power": lastState})
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 8d42f15da3..2d4d147920 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -103,10 +103,10 @@ func doContainersGet(d *Daemon, r *http.Request) 
(interface{}, error) {
                return []string{}, err
        }
 
-       // Get the local containers
-       nodeCts := map[string]container{}
+       // Get the local instances
+       nodeCts := map[string]Instance{}
        if recursion > 0 {
-               cts, err := containerLoadNodeProjectAll(d.State(), project, 
instanceType)
+               cts, err := instanceLoadNodeProjectAll(d.State(), project, 
instanceType)
                if err != nil {
                        return nil, err
                }
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 583af879ad..d9a0f1d3a7 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -992,14 +992,14 @@ func (d *Daemon) Ready() error {
 }
 
 func (d *Daemon) numRunningContainers() (int, error) {
-       results, err := containerLoadNodeAll(d.State())
+       results, err := instanceLoadNodeAll(d.State())
        if err != nil {
                return 0, err
        }
 
        count := 0
-       for _, container := range results {
-               if container.IsRunning() {
+       for _, instance := range results {
+               if instance.IsRunning() {
                        count = count + 1
                }
        }
diff --git a/lxd/devices.go b/lxd/devices.go
index da7c13d42c..aeecf4a69a 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -293,16 +293,16 @@ func deviceTaskBalance(s *state.State) {
                return
        }
 
-       // Iterate through the containers
-       containers, err := containerLoadNodeAll(s)
+       // Iterate through the instances
+       instances, err := instanceLoadNodeAll(s)
        if err != nil {
-               logger.Error("Problem loading containers list", log.Ctx{"err": 
err})
+               logger.Error("Problem loading instances list", log.Ctx{"err": 
err})
                return
        }
 
-       fixedContainers := map[int][]container{}
-       balancedContainers := map[container]int{}
-       for _, c := range containers {
+       fixedInstances := map[int][]Instance{}
+       balancedInstances := map[Instance]int{}
+       for _, c := range instances {
                conf := c.ExpandedConfig()
                cpulimit, ok := conf["limits.cpu"]
                if !ok || cpulimit == "" {
@@ -317,7 +317,7 @@ func deviceTaskBalance(s *state.State) {
                if err == nil {
                        // Load-balance
                        count = min(count, len(cpus))
-                       balancedContainers[c] = count
+                       balancedInstances[c] = count
                } else {
                        // Pinned
                        containerCpus, err := parseCpuset(cpulimit)
@@ -329,18 +329,18 @@ func deviceTaskBalance(s *state.State) {
                                        continue
                                }
 
-                               _, ok := fixedContainers[nr]
+                               _, ok := fixedInstances[nr]
                                if ok {
-                                       fixedContainers[nr] = 
append(fixedContainers[nr], c)
+                                       fixedInstances[nr] = 
append(fixedInstances[nr], c)
                                } else {
-                                       fixedContainers[nr] = []container{c}
+                                       fixedInstances[nr] = []Instance{c}
                                }
                        }
                }
        }
 
        // Balance things
-       pinning := map[container][]string{}
+       pinning := map[Instance][]string{}
        usage := map[int]deviceTaskCPU{}
 
        for _, id := range cpus {
@@ -353,7 +353,7 @@ func deviceTaskBalance(s *state.State) {
                usage[id] = cpu
        }
 
-       for cpu, ctns := range fixedContainers {
+       for cpu, ctns := range fixedInstances {
                c, ok := usage[cpu]
                if !ok {
                        logger.Errorf("Internal error: container using 
unavailable cpu")
@@ -376,7 +376,7 @@ func deviceTaskBalance(s *state.State) {
                sortedUsage = append(sortedUsage, value)
        }
 
-       for ctn, count := range balancedContainers {
+       for ctn, count := range balancedInstances {
                sort.Sort(sortedUsage)
                for _, cpu := range sortedUsage {
                        if count == 0 {
@@ -416,12 +416,12 @@ func deviceNetworkPriority(s *state.State, netif string) {
                return
        }
 
-       containers, err := containerLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s)
        if err != nil {
                return
        }
 
-       for _, c := range containers {
+       for _, c := range instances {
                // Extract the current priority
                networkPriority := c.ExpandedConfig()["limits.network.priority"]
                if networkPriority == "" {
@@ -494,16 +494,16 @@ func deviceEventListener(s *state.State) {
 
 // devicesRegister calls the Register() function on all supported devices so 
they receive events.
 func devicesRegister(s *state.State) {
-       containers, err := containerLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s)
        if err != nil {
                logger.Error("Problem loading containers list", log.Ctx{"err": 
err})
                return
        }
 
-       for _, containerIf := range containers {
-               c, ok := containerIf.(*containerLXC)
+       for _, instanceIf := range instances {
+               c, ok := instanceIf.(*containerLXC)
                if !ok {
-                       logger.Errorf("Got non-LXC container")
+                       logger.Errorf("Instance is not container type")
                        continue
                }
 
diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index 3c76c0b030..7ceeb96685 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -483,24 +483,28 @@ func findContainerForPid(pid int32, d *Daemon) 
(container, error) {
                return nil, err
        }
 
-       containers, err := containerLoadNodeAll(d.State())
+       instances, err := instanceLoadNodeAll(d.State())
        if err != nil {
                return nil, err
        }
 
-       for _, c := range containers {
-               if !c.IsRunning() {
+       for _, inst := range instances {
+               if inst.Type() != instance.TypeContainer {
                        continue
                }
 
-               initpid := c.InitPID()
+               if !inst.IsRunning() {
+                       continue
+               }
+
+               initpid := inst.InitPID()
                pidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", 
initpid))
                if err != nil {
                        return nil, err
                }
 
                if origPidNs == pidNs {
-                       return c, nil
+                       return inst.(container), nil
                }
        }
 
diff --git a/lxd/networks.go b/lxd/networks.go
index 1e7607231d..11e6b2b699 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -22,6 +22,7 @@ import (
        "github.com/lxc/lxd/lxd/db"
        "github.com/lxc/lxd/lxd/device"
        "github.com/lxc/lxd/lxd/dnsmasq"
+       "github.com/lxc/lxd/lxd/instance"
        "github.com/lxc/lxd/lxd/iptables"
        "github.com/lxc/lxd/lxd/node"
        "github.com/lxc/lxd/lxd/state"
@@ -429,16 +430,16 @@ func doNetworkGet(d *Daemon, name string) (api.Network, 
error) {
 
        // Look for containers using the interface
        if n.Type != "loopback" {
-               cts, err := containerLoadFromAllProjects(d.State())
+               insts, err := instanceLoadFromAllProjects(d.State())
                if err != nil {
                        return api.Network{}, err
                }
 
-               for _, c := range cts {
-                       if networkIsInUse(c, n.Name) {
-                               uri := fmt.Sprintf("/%s/containers/%s", 
version.APIVersion, c.Name())
-                               if c.Project() != "default" {
-                                       uri += fmt.Sprintf("?project=%s", 
c.Project())
+               for _, inst := range insts {
+                       if networkIsInUse(inst, n.Name) {
+                               uri := fmt.Sprintf("/%s/containers/%s", 
version.APIVersion, inst.Name())
+                               if inst.Project() != "default" {
+                                       uri += fmt.Sprintf("?project=%s", 
inst.Project())
                                }
                                n.UsedBy = append(n.UsedBy, uri)
                        }
@@ -712,24 +713,26 @@ func networkLeasesGet(d *Daemon, r *http.Request) 
Response {
 
        // Get all static leases
        if !isClusterNotification(r) {
-               // Get all the containers
-               containers, err := containerLoadByProject(d.State(), project)
+               // Get all the instances
+               instances, err := instanceLoadByProject(d.State(), project)
                if err != nil {
                        return SmartError(err)
                }
 
-               for _, c := range containers {
+               for _, inst := range instances {
                        // Go through all its devices (including profiles
-                       for k, d := range c.ExpandedDevices() {
+                       for k, d := range inst.ExpandedDevices() {
                                // Skip uninteresting entries
                                if d["type"] != "nic" || d["nictype"] != 
"bridged" || d["parent"] != name {
                                        continue
                                }
 
                                // Fill in the hwaddr from volatile
-                               d, err = c.(*containerLXC).fillNetworkDevice(k, 
d)
-                               if err != nil {
-                                       continue
+                               if inst.Type() == instance.TypeContainer {
+                                       d, err = 
inst.(*containerLXC).fillNetworkDevice(k, d)
+                                       if err != nil {
+                                               continue
+                                       }
                                }
 
                                // Record the MAC
@@ -740,21 +743,21 @@ func networkLeasesGet(d *Daemon, r *http.Request) 
Response {
                                // Add the lease
                                if d["ipv4.address"] != "" {
                                        leases = append(leases, 
api.NetworkLease{
-                                               Hostname: c.Name(),
+                                               Hostname: inst.Name(),
                                                Address:  d["ipv4.address"],
                                                Hwaddr:   d["hwaddr"],
                                                Type:     "static",
-                                               Location: c.Location(),
+                                               Location: inst.Location(),
                                        })
                                }
 
                                if d["ipv6.address"] != "" {
                                        leases = append(leases, 
api.NetworkLease{
-                                               Hostname: c.Name(),
+                                               Hostname: inst.Name(),
                                                Address:  d["ipv6.address"],
                                                Hwaddr:   d["hwaddr"],
                                                Type:     "static",
-                                               Location: c.Location(),
+                                               Location: inst.Location(),
                                        })
                                }
                        }
@@ -956,14 +959,14 @@ func (n *network) IsRunning() bool {
 }
 
 func (n *network) IsUsed() bool {
-       // Look for containers using the interface
-       cts, err := containerLoadFromAllProjects(n.state)
+       // Look for instances using the interface
+       insts, err := instanceLoadFromAllProjects(n.state)
        if err != nil {
                return true
        }
 
-       for _, c := range cts {
-               if networkIsInUse(c, n.name) {
+       for _, inst := range insts {
+               if networkIsInUse(inst, n.name) {
                        return true
                }
        }
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 299d65c8cd..f6a939aef2 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -25,6 +25,7 @@ import (
        "github.com/lxc/lxd/lxd/db"
        "github.com/lxc/lxd/lxd/device"
        "github.com/lxc/lxd/lxd/dnsmasq"
+       "github.com/lxc/lxd/lxd/instance"
        "github.com/lxc/lxd/lxd/project"
        "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/shared"
@@ -89,7 +90,7 @@ func networkGetInterfaces(cluster *db.Cluster) ([]string, 
error) {
        return networks, nil
 }
 
-func networkIsInUse(c container, name string) bool {
+func networkIsInUse(c Instance, name string) bool {
        for _, d := range c.ExpandedDevices() {
                if d["type"] != "nic" {
                        continue
@@ -637,26 +638,28 @@ func networkUpdateStatic(s *state.State, networkName 
string) error {
                networks = []string{networkName}
        }
 
-       // Get all the containers
-       containers, err := containerLoadNodeAll(s)
+       // Get all the instances
+       insts, err := instanceLoadNodeAll(s)
        if err != nil {
                return err
        }
 
        // Build a list of dhcp host entries
        entries := map[string][][]string{}
-       for _, c := range containers {
+       for _, inst := range insts {
                // Go through all its devices (including profiles
-               for k, d := range c.ExpandedDevices() {
+               for k, d := range inst.ExpandedDevices() {
                        // Skip uninteresting entries
                        if d["type"] != "nic" || d["nictype"] != "bridged" || 
!shared.StringInSlice(d["parent"], networks) {
                                continue
                        }
 
-                       // Fill in the hwaddr from volatile
-                       d, err = c.(*containerLXC).fillNetworkDevice(k, d)
-                       if err != nil {
-                               continue
+                       if inst.Type() == instance.TypeContainer {
+                               // Fill in the hwaddr from volatile
+                               d, err = 
inst.(*containerLXC).fillNetworkDevice(k, d)
+                               if err != nil {
+                                       continue
+                               }
                        }
 
                        // Add the new host entries
@@ -666,7 +669,7 @@ func networkUpdateStatic(s *state.State, networkName 
string) error {
                        }
 
                        if (shared.IsTrue(d["security.ipv4_filtering"]) && 
d["ipv4.address"] == "") || (shared.IsTrue(d["security.ipv6_filtering"]) && 
d["ipv6.address"] == "") {
-                               curIPv4, curIPv6, err := 
dnsmasq.DHCPStaticIPs(d["parent"], c.Name())
+                               curIPv4, curIPv6, err := 
dnsmasq.DHCPStaticIPs(d["parent"], inst.Name())
                                if err != nil && !os.IsNotExist(err) {
                                        return err
                                }
@@ -680,7 +683,7 @@ func networkUpdateStatic(s *state.State, networkName 
string) error {
                                }
                        }
 
-                       entries[d["parent"]] = append(entries[d["parent"]], 
[]string{d["hwaddr"], c.Project(), c.Name(), d["ipv4.address"], 
d["ipv6.address"]})
+                       entries[d["parent"]] = append(entries[d["parent"]], 
[]string{d["hwaddr"], inst.Project(), inst.Name(), d["ipv4.address"], 
d["ipv6.address"]})
                }
        }
 
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 2c2f3d63d7..b25a1288d9 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -238,20 +238,20 @@ func storagePoolVolumeSnapshotUpdate(state *state.State, 
poolName string, volume
 }
 
 func storagePoolVolumeUsedByContainersGet(s *state.State, project, poolName 
string, volumeName string) ([]string, error) {
-       cts, err := containerLoadByProject(s, project)
+       insts, err := instanceLoadByProject(s, project)
        if err != nil {
                return []string{}, err
        }
 
        ctsUsingVolume := []string{}
-       for _, c := range cts {
-               for _, dev := range c.LocalDevices() {
+       for _, inst := range insts {
+               for _, dev := range inst.LocalDevices() {
                        if dev["type"] != "disk" {
                                continue
                        }
 
                        if dev["pool"] == poolName && dev["source"] == 
volumeName {
-                               ctsUsingVolume = append(ctsUsingVolume, 
c.Name())
+                               ctsUsingVolume = append(ctsUsingVolume, 
inst.Name())
                                break
                        }
                }
@@ -264,14 +264,14 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName 
string,
        oldVolumeName string, newPoolName string, newVolumeName string) error {
 
        s := d.State()
-       // update all containers
-       cts, err := containerLoadAll(s)
+       // update all instances
+       insts, err := instanceLoadAll(s)
        if err != nil {
                return err
        }
 
-       for _, c := range cts {
-               devices := c.LocalDevices()
+       for _, inst := range insts {
+               devices := inst.LocalDevices()
                for k := range devices {
                        if devices[k]["type"] != "disk" {
                                continue
@@ -298,7 +298,6 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName 
string,
                        }
 
                        // found entry
-
                        if oldPoolName != newPoolName {
                                devices[k]["pool"] = newPoolName
                        }
@@ -313,18 +312,18 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName 
string,
                }
 
                args := db.ContainerArgs{
-                       Architecture: c.Architecture(),
-                       Description:  c.Description(),
-                       Config:       c.LocalConfig(),
+                       Architecture: inst.Architecture(),
+                       Description:  inst.Description(),
+                       Config:       inst.LocalConfig(),
                        Devices:      devices,
-                       Ephemeral:    c.IsEphemeral(),
-                       Profiles:     c.Profiles(),
-                       Project:      c.Project(),
-                       Type:         c.Type(),
-                       Snapshot:     c.IsSnapshot(),
+                       Ephemeral:    inst.IsEphemeral(),
+                       Profiles:     inst.Profiles(),
+                       Project:      inst.Project(),
+                       Type:         inst.Type(),
+                       Snapshot:     inst.IsSnapshot(),
                }
 
-               err = c.Update(args, false)
+               err = inst.Update(args, false)
                if err != nil {
                        return err
                }
@@ -398,19 +397,19 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName 
string,
 func storagePoolVolumeUsedByRunningContainersWithProfilesGet(s *state.State,
        poolName string, volumeName string, volumeTypeName string,
        runningOnly bool) ([]string, error) {
-       cts, err := containerLoadAll(s)
+       insts, err := instanceLoadAll(s)
        if err != nil {
                return []string{}, err
        }
 
        ctsUsingVolume := []string{}
        volumeNameWithType := fmt.Sprintf("%s/%s", volumeTypeName, volumeName)
-       for _, c := range cts {
-               if runningOnly && !c.IsRunning() {
+       for _, inst := range insts {
+               if runningOnly && !inst.IsRunning() {
                        continue
                }
 
-               for _, dev := range c.ExpandedDevices() {
+               for _, dev := range inst.ExpandedDevices() {
                        if dev["type"] != "disk" {
                                continue
                        }
@@ -423,7 +422,7 @@ func 
storagePoolVolumeUsedByRunningContainersWithProfilesGet(s *state.State,
                        // "container////bla" but only against "container/bla".
                        cleanSource := filepath.Clean(dev["source"])
                        if cleanSource == volumeName || cleanSource == 
volumeNameWithType {
-                               ctsUsingVolume = append(ctsUsingVolume, 
c.Name())
+                               ctsUsingVolume = append(ctsUsingVolume, 
inst.Name())
                        }
                }
        }

From 151dc59339feb97aeff14be1a15a6525b7a45941 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Sep 2019 09:07:14 +0100
Subject: [PATCH 2/6] lxd/storage/btrfs: Fixes bug with BTRFS snapshot copy

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage_btrfs.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index a47205b80a..e16d7a2ac2 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -1178,7 +1178,7 @@ func (s *storageBtrfs) ContainerCopy(target Instance, 
source Instance, container
                        return err
                }
 
-               err = s.copySnapshot(sourceSnapshot, targetSnapshot)
+               err = s.copySnapshot(targetSnapshot, sourceSnapshot)
                if err != nil {
                        return err
                }

From 818daadbf6e48b8749706e6aec3e096c51103776 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Sep 2019 10:17:50 +0100
Subject: [PATCH 3/6] lxd/vm/qemu: Adds qemu virtual machine base
 implementation of Instance interface

- Had to define in the main package because it relies on storeage and operation 
types.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/vm_qemu.go | 337 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 337 insertions(+)
 create mode 100644 lxd/vm_qemu.go

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
new file mode 100644
index 0000000000..ecbf328bed
--- /dev/null
+++ b/lxd/vm_qemu.go
@@ -0,0 +1,337 @@
+package main
+
+import (
+       "fmt"
+       "io"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "time"
+
+       "github.com/lxc/lxd/lxd/db"
+       "github.com/lxc/lxd/lxd/device"
+       deviceConfig "github.com/lxc/lxd/lxd/device/config"
+       "github.com/lxc/lxd/lxd/instance"
+       "github.com/lxc/lxd/lxd/project"
+       "github.com/lxc/lxd/lxd/state"
+       driver "github.com/lxc/lxd/lxd/storage"
+       "github.com/lxc/lxd/shared"
+       "github.com/lxc/lxd/shared/api"
+)
+
+// The QEMU virtual machine driver.
+type vmQemu struct {
+       // Properties
+       architecture int
+       dbType       instance.Type
+       snapshot     bool
+       creationDate time.Time
+       lastUsedDate time.Time
+       ephemeral    bool
+       id           int
+       project      string
+       name         string
+       description  string
+       stateful     bool
+
+       // Config
+       expandedConfig  map[string]string
+       expandedDevices deviceConfig.Devices
+       localConfig     map[string]string
+       localDevices    deviceConfig.Devices
+       profiles        []string
+
+       state *state.State
+
+       // Storage
+       storage storage
+
+       // Clustering
+       node string
+
+       // Progress tracking
+       op *operation
+
+       expiryDate time.Time
+}
+
+func (q *vmQemu) Freeze() error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Shutdown(timeout time.Duration) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Start(stateful bool) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Stop(stateful bool) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Unfreeze() error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) IsPrivileged() bool {
+       return shared.IsTrue(q.expandedConfig["security.privileged"])
+}
+
+func (q *vmQemu) Restore(source Instance, stateful bool) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Snapshots() ([]Instance, error) {
+       return nil, fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Backups() ([]backup, error) {
+       return nil, fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Rename(newName string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Update(args db.ContainerArgs, userRequested bool) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Delete() error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Export(w io.Writer, properties map[string]string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) CGroupGet(key string) (string, error) {
+       return "", fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) CGroupSet(key string, value string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) VolatileSet(changes map[string]string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) FileExists(path string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) FilePull(srcpath string, dstpath string) (int64, int64, 
os.FileMode, string, []string, error) {
+       return 0, 0, 0, "", nil, fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) FilePush(type_ string, srcpath string, dstpath string, uid 
int64, gid int64, mode int, write string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) FileRemove(path string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Console(terminal *os.File) *exec.Cmd {
+       return nil
+}
+
+func (q *vmQemu) Exec(command []string, env map[string]string, stdin *os.File, 
stdout *os.File, stderr *os.File, wait bool, cwd string, uid uint32, gid 
uint32) (*exec.Cmd, int, int, error) {
+       return nil, 0, 0, fmt.Errorf("Not implemented")
+
+}
+
+func (q *vmQemu) Render() (interface{}, interface{}, error) {
+       return nil, nil, fmt.Errorf("Not implemented")
+
+}
+
+func (q *vmQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
+       return nil, nil, fmt.Errorf("Not implemented")
+
+}
+
+func (q *vmQemu) RenderState() (*api.InstanceState, error) {
+       return nil, fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) IsRunning() bool {
+       state := q.State()
+       return state != "BROKEN" && state != "STOPPED"
+}
+
+func (q *vmQemu) IsFrozen() bool {
+       return q.State() == "FROZEN"
+}
+
+func (q *vmQemu) IsEphemeral() bool {
+       return q.ephemeral
+}
+
+func (q *vmQemu) IsSnapshot() bool {
+       return q.snapshot
+}
+
+func (q *vmQemu) IsStateful() bool {
+       return q.stateful
+}
+
+func (q *vmQemu) DeviceEventHandler(runConf *device.RunConfig) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) Id() int {
+       return q.id
+}
+
+func (q *vmQemu) Location() string {
+       return q.node
+}
+
+func (q *vmQemu) Project() string {
+       return q.project
+}
+
+func (q *vmQemu) Name() string {
+       return q.name
+}
+
+func (q *vmQemu) Type() instance.Type {
+       return q.dbType
+}
+
+func (q *vmQemu) Description() string {
+       return q.description
+}
+
+func (q *vmQemu) Architecture() int {
+       return q.architecture
+}
+
+func (q *vmQemu) CreationDate() time.Time {
+       return q.creationDate
+}
+func (q *vmQemu) LastUsedDate() time.Time {
+       return q.lastUsedDate
+}
+
+func (q *vmQemu) ExpandedConfig() map[string]string {
+       return q.expandedConfig
+}
+
+func (q *vmQemu) ExpandedDevices() deviceConfig.Devices {
+       return q.expandedDevices
+}
+
+func (q *vmQemu) LocalConfig() map[string]string {
+       return q.localConfig
+}
+
+func (q *vmQemu) LocalDevices() deviceConfig.Devices {
+       return q.localDevices
+}
+
+func (q *vmQemu) Profiles() []string {
+       return q.profiles
+}
+
+func (q *vmQemu) InitPID() int {
+       return -1
+}
+
+func (q *vmQemu) State() string {
+       return ""
+}
+
+func (q *vmQemu) ExpiryDate() time.Time {
+       if q.IsSnapshot() {
+               return q.expiryDate
+       }
+
+       // Return zero time if the container is not a snapshot
+       return time.Time{}
+}
+
+func (q *vmQemu) Path() string {
+       name := project.Prefix(q.Project(), q.Name())
+       return driver.ContainerPath(name, q.IsSnapshot())
+}
+
+func (q *vmQemu) DevicesPath() string {
+       name := project.Prefix(q.Project(), q.Name())
+       return shared.VarPath("devices", name)
+}
+
+func (q *vmQemu) ShmountsPath() string {
+       name := project.Prefix(q.Project(), q.Name())
+       return shared.VarPath("shmounts", name)
+}
+
+func (q *vmQemu) LogPath() string {
+       name := project.Prefix(q.Project(), q.Name())
+       return shared.LogPath(name)
+}
+
+func (q *vmQemu) LogFilePath() string {
+       return filepath.Join(q.LogPath(), "lxq.log")
+}
+
+func (q *vmQemu) ConsoleBufferLogPath() string {
+       return filepath.Join(q.LogPath(), "console.log")
+}
+
+func (q *vmQemu) RootfsPath() string {
+       return filepath.Join(q.Path(), "rootfs")
+}
+
+func (q *vmQemu) TemplatesPath() string {
+       return filepath.Join(q.Path(), "templates")
+}
+
+func (q *vmQemu) StatePath() string {
+       return filepath.Join(q.Path(), "state")
+}
+
+func (q *vmQemu) StoragePool() (string, error) {
+       poolName, err := q.state.Cluster.ContainerPool(q.Project(), q.Name())
+       if err != nil {
+               return "", err
+       }
+
+       return poolName, nil
+}
+
+func (q *vmQemu) SetOperation(op *operation) {
+       q.op = op
+}
+
+func (q *vmQemu) StorageStart() (bool, error) {
+       return false, fmt.Errorf("Not implemented")
+
+}
+
+func (q *vmQemu) StorageStop() (bool, error) {
+       return false, fmt.Errorf("Not implemented")
+
+}
+
+func (q *vmQemu) Storage() storage {
+       return nil
+}
+
+func (q *vmQemu) TemplateApply(trigger string) error {
+       return fmt.Errorf("Not implemented")
+}
+
+func (q *vmQemu) DaemonState() *state.State {
+       // FIXME: This function should go away, since the abstract container
+       //        interface should not be coupled with internal state details.
+       //        However this is not currently possible, because many
+       //        higher-level APIs use container variables as "implicit
+       //        handles" to database/OS state and then need a way to get a
+       //        reference to it.
+       return q.state
+}

From 2ae062e22891d5f651caff610918e413d2e1cec6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Sep 2019 10:40:17 +0100
Subject: [PATCH 4/6] vm qemu cont

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/vm_qemu.go | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 84 insertions(+)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index ecbf328bed..2a048a584c 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -19,6 +19,62 @@ import (
        "github.com/lxc/lxd/shared/api"
 )
 
+func vmQemuLoad(s *state.State, args db.ContainerArgs, profiles []api.Profile) 
(Instance, error) {
+       // Create the container struct
+       q := vmQemuInstantiate(s, args)
+
+       // Expand config and devices
+       err := q.expandConfig(profiles)
+       if err != nil {
+               return nil, err
+       }
+
+       err = q.expandDevices(profiles)
+       if err != nil {
+               return nil, err
+       }
+
+       return q, nil
+}
+
+// vmQemuInstantiate creates a vmQemu struct without initializing it.
+func vmQemuInstantiate(s *state.State, args db.ContainerArgs) *vmQemu {
+       q := &vmQemu{
+               state:        s,
+               id:           args.ID,
+               project:      args.Project,
+               name:         args.Name,
+               description:  args.Description,
+               ephemeral:    args.Ephemeral,
+               architecture: args.Architecture,
+               dbType:       args.Type,
+               snapshot:     args.Snapshot,
+               creationDate: args.CreationDate,
+               lastUsedDate: args.LastUsedDate,
+               profiles:     args.Profiles,
+               localConfig:  args.Config,
+               localDevices: args.Devices,
+               stateful:     args.Stateful,
+               node:         args.Node,
+               expiryDate:   args.ExpiryDate,
+       }
+
+       // Cleanup the zero values
+       if q.expiryDate.IsZero() {
+               q.expiryDate = time.Time{}
+       }
+
+       if q.creationDate.IsZero() {
+               q.creationDate = time.Time{}
+       }
+
+       if q.lastUsedDate.IsZero() {
+               q.lastUsedDate = time.Time{}
+       }
+
+       return q
+}
+
 // The QEMU virtual machine driver.
 type vmQemu struct {
        // Properties
@@ -218,6 +274,34 @@ func (q *vmQemu) LastUsedDate() time.Time {
        return q.lastUsedDate
 }
 
+func (q *vmQemu) expandConfig(profiles []api.Profile) error {
+       if profiles == nil && len(q.profiles) > 0 {
+               var err error
+               profiles, err = q.state.Cluster.ProfilesGet(q.project, 
q.profiles)
+               if err != nil {
+                       return err
+               }
+       }
+
+       q.expandedConfig = db.ProfilesExpandConfig(q.localConfig, profiles)
+
+       return nil
+}
+
+func (q *vmQemu) expandDevices(profiles []api.Profile) error {
+       if profiles == nil && len(q.profiles) > 0 {
+               var err error
+               profiles, err = q.state.Cluster.ProfilesGet(q.project, 
q.profiles)
+               if err != nil {
+                       return err
+               }
+       }
+
+       q.expandedDevices = db.ProfilesExpandDevices(q.localDevices, profiles)
+
+       return nil
+}
+
 func (q *vmQemu) ExpandedConfig() map[string]string {
        return q.expandedConfig
 }

From 093a7a0b411f90406765f17ffb168806954d2f28 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Sep 2019 10:57:17 +0100
Subject: [PATCH 5/6] lxd/container: Adds instanceInstantiate function

instanceInstantiate function will create the correct underlying struct based on 
instance type and returns it as an Instance.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/container.go | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/lxd/container.go b/lxd/container.go
index 45c7687dba..35aefe5a67 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -1142,6 +1142,26 @@ func instanceLoadAllInternal(dbInstances []db.Instance, 
s *state.State) ([]Insta
        return instances, nil
 }
 
+// instanceInstantiate creates the underlying instance type struct and returns 
it as an Instance.
+func instanceInstantiate(s *state.State, args db.ContainerArgs, cProfiles 
[]api.Profile) (Instance, error) {
+       var inst Instance
+       var err error
+
+       if args.Type == instance.TypeContainer {
+               inst, err = containerLXCLoad(s, args, cProfiles)
+       } else if args.Type == instance.TypeVM {
+               inst, err = vmQemuLoad(s, args, cProfiles)
+       } else {
+               return nil, fmt.Errorf("Invalid instance type for instance %s", 
args.Name)
+       }
+
+       if err != nil {
+               return nil, err
+       }
+
+       return inst, nil
+}
+
 func containerCompareSnapshots(source Instance, target Instance) ([]Instance, 
[]Instance, error) {
        // Get the source snapshots
        sourceSnapshots, err := source.Snapshots()

From eff91d441abb7765dc842aa484a98daf00371fff Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Sep 2019 10:58:22 +0100
Subject: [PATCH 6/6] lxd: Replaces use of containerLXCLoad with
 instanceInstantiate

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/container.go       | 20 +++++++-------------
 lxd/containers.go      |  4 ++--
 lxd/containers_post.go |  4 ++--
 3 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 35aefe5a67..9d1e384982 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -985,13 +985,12 @@ func instanceLoadByProjectAndName(s *state.State, 
project, name string) (Instanc
        }
 
        args := db.ContainerToArgs(container)
-
-       c, err := containerLXCLoad(s, args, nil)
+       inst, err := instanceInstantiate(s, args, nil)
        if err != nil {
                return nil, errors.Wrap(err, "Failed to load container")
        }
 
-       return c, nil
+       return inst, nil
 }
 
 func instanceLoadByProject(s *state.State, project string) ([]Instance, error) 
{
@@ -1125,18 +1124,13 @@ func instanceLoadAllInternal(dbInstances []db.Instance, 
s *state.State) ([]Insta
                        cProfiles = append(cProfiles, 
profiles[dbInstance.Project][name])
                }
 
-               if dbInstance.Type == instance.TypeContainer {
-                       args := db.ContainerToArgs(&dbInstance)
-                       ct, err := containerLXCLoad(s, args, cProfiles)
-                       if err != nil {
-                               return nil, err
-                       }
-                       instances = append(instances, ct)
-               } else {
-                       // TODO add virtual machine load here.
-                       continue
+               args := db.ContainerToArgs(&dbInstance)
+               inst, err := instanceInstantiate(s, args, cProfiles)
+               if err != nil {
+                       return nil, err
                }
 
+               instances = append(instances, inst)
        }
 
        return instances, nil
diff --git a/lxd/containers.go b/lxd/containers.go
index a340642a00..bf5e682067 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -278,7 +278,7 @@ func containersShutdown(s *state.State) error {
 
                for project, names := range cnames {
                        for _, name := range names {
-                               c, err := containerLXCLoad(s, db.ContainerArgs{
+                               inst, err := instanceInstantiate(s, 
db.ContainerArgs{
                                        Project: project,
                                        Name:    name,
                                        Config:  make(map[string]string),
@@ -287,7 +287,7 @@ func containersShutdown(s *state.State) error {
                                        return err
                                }
 
-                               instances = append(instances, c)
+                               instances = append(instances, inst)
                        }
                }
        }
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 822af02534..d6720b64ce 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -321,12 +321,12 @@ func createFromMigration(d *Daemon, project string, req 
*api.InstancesPost) Resp
                        }
                } else {
                        // Retrieve the future storage pool
-                       cM, err := containerLXCLoad(d.State(), args, nil)
+                       inst, err := instanceInstantiate(d.State(), args, nil)
                        if err != nil {
                                return InternalError(err)
                        }
 
-                       _, rootDiskDevice, err := 
shared.GetRootDiskDevice(cM.ExpandedDevices().CloneNative())
+                       _, rootDiskDevice, err := 
shared.GetRootDiskDevice(inst.ExpandedDevices().CloneNative())
                        if err != nil {
                                return InternalError(err)
                        }
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to