The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6092

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===

From c825df0c4371c5827a0921d68bca435c2d825437 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanay...@canonical.com>
Date: Fri, 16 Aug 2019 09:48:46 +0200
Subject: [PATCH 1/4] Use query.Transaction instead of manual tx management

Signed-off-by: Free Ekanayaka <free.ekanay...@canonical.com>
---
 lxd/db/cluster/open.go | 47 +++++++++++++++++++-----------------------
 1 file changed, 21 insertions(+), 26 deletions(-)

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index 7d0aef3c25..e580e13de1 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -6,7 +6,7 @@ import (
        "path/filepath"
        "sync/atomic"
 
-       "github.com/canonical/go-dqlite"
+       dqlite "github.com/canonical/go-dqlite"
        "github.com/lxc/lxd/lxd/db/query"
        "github.com/lxc/lxd/lxd/db/schema"
        "github.com/lxc/lxd/lxd/util"
@@ -165,42 +165,37 @@ func EnsureSchema(db *sql.DB, address string, dir string) 
(bool, error) {
        // 1. This is needed for referential integrity with other tables. Also,
        // create a default profile.
        if initial == 0 {
-               tx, err := db.Begin()
-               if err != nil {
-                       return false, err
-               }
-               stmt := `
+               err = query.Transaction(db, func(tx *sql.Tx) error {
+                       stmt := `
 INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', 
'0.0.0.0', ?, ?)
 `
-               _, err = tx.Exec(stmt, SchemaVersion, apiExtensions)
-               if err != nil {
-                       tx.Rollback()
-                       return false, err
-               }
+                       _, err = tx.Exec(stmt, SchemaVersion, apiExtensions)
+                       if err != nil {
+                               return err
+                       }
 
-               // Default project
-               stmt = `
+                       // Default project
+                       stmt = `
 INSERT INTO projects (name, description) VALUES ('default', 'Default LXD 
project');
 INSERT INTO projects_config (project_id, key, value) VALUES (1, 
'features.images', 'true');
 INSERT INTO projects_config (project_id, key, value) VALUES (1, 
'features.profiles', 'true');
 `
-               _, err = tx.Exec(stmt)
-               if err != nil {
-                       tx.Rollback()
-                       return false, err
-               }
+                       _, err = tx.Exec(stmt)
+                       if err != nil {
+                               return err
+                       }
 
-               // Default profile
-               stmt = `
+                       // Default profile
+                       stmt = `
 INSERT INTO profiles (name, description, project_id) VALUES ('default', 
'Default LXD profile', 1)
 `
-               _, err = tx.Exec(stmt)
-               if err != nil {
-                       tx.Rollback()
-                       return false, err
-               }
+                       _, err = tx.Exec(stmt)
+                       if err != nil {
+                               return err
+                       }
 
-               err = tx.Commit()
+                       return nil
+               })
                if err != nil {
                        return false, err
                }

From 020f1c406900383a54e4f144d5d2bc7ffb2cae25 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanay...@canonical.com>
Date: Fri, 16 Aug 2019 10:12:59 +0200
Subject: [PATCH 2/4] Add copy of cluster schema version 14

Signed-off-by: Free Ekanayaka <free.ekanay...@canonical.com>
---
 lxd/db/migration.go | 406 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 406 insertions(+)

diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 2f16ec644b..9becccf8f1 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -296,3 +296,409 @@ var preClusteringTables = []string{
        "storage_volumes",
        "storage_volumes_config",
 }
+
+// Copy of version 14 of the clustering schema. The data migration code from
+// LXD 2.0 is meant to be run against this schema. Further schema changes are
+// applied using the normal schema update logic.
+var clusterSchemaVersion14 = `
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    type INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
+CREATE TABLE "containers" (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    node_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    project_id INTEGER NOT NULL,
+    expiry_date DATETIME,
+    UNIQUE (project_id, name),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE,
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE
+);
+CREATE TABLE containers_backups (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    container_only INTEGER NOT NULL default 0,
+    optimized_storage INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE VIEW containers_config_ref (project,
+    node,
+    name,
+    key,
+    value) AS
+   SELECT projects.name,
+    nodes.name,
+    containers.name,
+    containers_config.key,
+    containers_config.value
+     FROM containers_config
+       JOIN containers ON containers.id=containers_config.container_id
+       JOIN projects ON projects.id=containers.project_id
+       JOIN nodes ON nodes.id=containers.node_id;
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON 
DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE VIEW containers_devices_ref (project,
+    node,
+    name,
+    device,
+    type,
+    key,
+    value) AS
+   SELECT projects.name,
+    nodes.name,
+    containers.name,
+          containers_devices.name,
+    containers_devices.type,
+          coalesce(containers_devices_config.key,
+    ''),
+    coalesce(containers_devices_config.value,
+    '')
+   FROM containers_devices
+     LEFT OUTER JOIN containers_devices_config ON 
containers_devices_config.container_device_id=containers_devices.id
+     JOIN containers ON containers.id=containers_devices.container_id
+     JOIN projects ON projects.id=containers.project_id
+     JOIN nodes ON nodes.id=containers.node_id;
+CREATE INDEX containers_node_id_idx ON containers (node_id);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE VIEW containers_profiles_ref (project,
+    node,
+    name,
+    value) AS
+   SELECT projects.name,
+    nodes.name,
+    containers.name,
+    profiles.name
+     FROM containers_profiles
+       JOIN containers ON containers.id=containers_profiles.container_id
+       JOIN profiles ON profiles.id=containers_profiles.profile_id
+       JOIN projects ON projects.id=containers.project_id
+       JOIN nodes ON nodes.id=containers.node_id
+     ORDER BY containers_profiles.apply_order;
+CREATE INDEX containers_project_id_and_name_idx ON containers (project_id,
+    name);
+CREATE INDEX containers_project_id_and_node_id_and_name_idx ON containers 
(project_id,
+    node_id,
+    name);
+CREATE INDEX containers_project_id_and_node_id_idx ON containers (project_id,
+    node_id);
+CREATE INDEX containers_project_id_idx ON containers (project_id);
+CREATE TABLE "images" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    filename TEXT NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    project_id INTEGER NOT NULL,
+    UNIQUE (project_id, fingerprint),
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    project_id INTEGER NOT NULL,
+    UNIQUE (project_id, name),
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE
+);
+CREATE INDEX images_aliases_project_id_idx ON images_aliases (project_id);
+CREATE TABLE images_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (image_id, node_id),
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE INDEX images_project_id_idx ON images (project_id);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias TEXT NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    state INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    node_id INTEGER,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, node_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE networks_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (network_id, node_id),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE nodes (
+    id INTEGER PRIMARY KEY,
+    name TEXT NOT NULL,
+    description TEXT DEFAULT '',
+    address TEXT NOT NULL,
+    schema INTEGER NOT NULL,
+    api_extensions INTEGER NOT NULL,
+    heartbeat DATETIME DEFAULT CURRENT_TIMESTAMP,
+    pending INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (name),
+    UNIQUE (address)
+);
+CREATE TABLE "operations" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    uuid TEXT NOT NULL,
+    node_id TEXT NOT NULL,
+    type INTEGER NOT NULL DEFAULT 0,
+    project_id INTEGER,
+    UNIQUE (uuid),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE,
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE
+);
+CREATE TABLE "profiles" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    project_id INTEGER NOT NULL,
+    UNIQUE (project_id, name),
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE VIEW profiles_config_ref (project,
+    name,
+    key,
+    value) AS
+   SELECT projects.name,
+    profiles.name,
+    profiles_config.key,
+    profiles_config.value
+     FROM profiles_config
+     JOIN profiles ON profiles.id=profiles_config.profile_id
+     JOIN projects ON projects.id=profiles.project_id;
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE 
CASCADE
+);
+CREATE VIEW profiles_devices_ref (project,
+    name,
+    device,
+    type,
+    key,
+    value) AS
+   SELECT projects.name,
+    profiles.name,
+          profiles_devices.name,
+    profiles_devices.type,
+          coalesce(profiles_devices_config.key,
+    ''),
+    coalesce(profiles_devices_config.value,
+    '')
+   FROM profiles_devices
+     LEFT OUTER JOIN profiles_devices_config ON 
profiles_devices_config.profile_device_id=profiles_devices.id
+     JOIN profiles ON profiles.id=profiles_devices.profile_id
+     JOIN projects ON projects.id=profiles.project_id;
+CREATE INDEX profiles_project_id_idx ON profiles (project_id);
+CREATE VIEW profiles_used_by_ref (project,
+    name,
+    value) AS
+  SELECT projects.name,
+    profiles.name,
+    printf('/1.0/containers/%s?project=%s',
+    containers.name,
+    containers_projects.name)
+    FROM profiles
+    JOIN projects ON projects.id=profiles.project_id
+    JOIN containers_profiles
+      ON containers_profiles.profile_id=profiles.id
+    JOIN containers
+      ON containers.id=containers_profiles.container_id
+    JOIN projects AS containers_projects
+      ON containers_projects.id=containers.project_id;
+CREATE TABLE projects (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE projects_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    project_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE,
+    UNIQUE (project_id, key)
+);
+CREATE VIEW projects_config_ref (name,
+    key,
+    value) AS
+   SELECT projects.name,
+    projects_config.key,
+    projects_config.value
+     FROM projects_config
+     JOIN projects ON projects.id=projects_config.project_id;
+CREATE VIEW projects_used_by_ref (name,
+    value) AS
+  SELECT projects.name,
+    printf('/1.0/containers/%s?project=%s',
+    containers.name,
+    projects.name)
+    FROM containers JOIN projects ON project_id=projects.id UNION
+  SELECT projects.name,
+    printf('/1.0/images/%s',
+    images.fingerprint)
+    FROM images JOIN projects ON project_id=projects.id UNION
+  SELECT projects.name,
+    printf('/1.0/profiles/%s?project=%s',
+    profiles.name,
+    projects.name)
+    FROM profiles JOIN projects ON project_id=projects.id;
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    driver TEXT NOT NULL,
+    description TEXT,
+    state INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, node_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE 
CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_pools_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, node_id),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE 
CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE "storage_volumes" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    snapshot INTEGER NOT NULL DEFAULT 0,
+    project_id INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, node_id, project_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE 
CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE,
+    FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE 
CASCADE
+);
+
+INSERT INTO schema (version, updated_at) VALUES (14, strftime("%s"))
+`

From 368a786154806785ea7a6f02446c43e8a01f11a4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanay...@canonical.com>
Date: Fri, 16 Aug 2019 10:18:03 +0200
Subject: [PATCH 3/4] Add Dump parameter to db.OpenCluster()

Signed-off-by: Free Ekanayaka <free.ekanay...@canonical.com>
---
 lxd/api_cluster.go             | 1 +
 lxd/cluster/heartbeat_test.go  | 3 ++-
 lxd/cluster/membership_test.go | 8 +++++---
 lxd/daemon.go                  | 2 +-
 lxd/db/db.go                   | 4 +++-
 lxd/db/testing.go              | 2 +-
 6 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 21c133c40a..b79b18de65 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -654,6 +654,7 @@ func clusterPutDisable(d *Daemon) Response {
        d.cluster, err = db.OpenCluster(
                "db.bin", store, address, "/unused/db/dir",
                d.config.DqliteSetupTimeout,
+               nil,
                dqlite.WithDialFunc(d.gateway.DialFunc()),
                dqlite.WithContext(d.gateway.Context()),
        )
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index cbfc1b1640..f14ce62de9 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -254,7 +254,8 @@ func (f *heartbeatFixture) node() (*state.State, 
*cluster.Gateway, string) {
        store := gateway.ServerStore()
        dial := gateway.DialFunc()
        state.Cluster, err = db.OpenCluster(
-               "db.bin", store, address, "/unused/db/dir", 5*time.Second, 
dqlite.WithDialFunc(dial))
+               "db.bin", store, address, "/unused/db/dir", 5*time.Second, nil,
+               dqlite.WithDialFunc(dial))
        require.NoError(f.t, err)
 
        f.gateways[len(f.gateways)] = gateway
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 6c3aff3f40..97e9c79e05 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -260,7 +260,7 @@ func TestJoin(t *testing.T) {
        var err error
        targetState.Cluster, err = db.OpenCluster(
                "db.bin", targetStore, targetAddress, "/unused/db/dir",
-               10*time.Second,
+               10*time.Second, nil,
                dqlite.WithDialFunc(targetDialFunc))
        require.NoError(t, err)
 
@@ -297,7 +297,8 @@ func TestJoin(t *testing.T) {
        dialFunc := gateway.DialFunc()
 
        state.Cluster, err = db.OpenCluster(
-               "db.bin", store, address, "/unused/db/dir", 5*time.Second, 
dqlite.WithDialFunc(dialFunc))
+               "db.bin", store, address, "/unused/db/dir", 5*time.Second, nil,
+               dqlite.WithDialFunc(dialFunc))
        require.NoError(t, err)
 
        f := &membershipFixtures{t: t, state: state}
@@ -380,7 +381,8 @@ func FLAKY_TestPromote(t *testing.T) {
        store := targetGateway.ServerStore()
        dialFunc := targetGateway.DialFunc()
        targetState.Cluster, err = db.OpenCluster(
-               "db.bin", store, targetAddress, "/unused/db/dir", 
5*time.Second, dqlite.WithDialFunc(dialFunc))
+               "db.bin", store, targetAddress, "/unused/db/dir", 
5*time.Second, nil,
+               dqlite.WithDialFunc(dialFunc))
        require.NoError(t, err)
        targetF := &membershipFixtures{t: t, state: targetState}
        targetF.ClusterAddress(targetAddress)
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 0aad1da9f9..58be2f8275 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -688,7 +688,7 @@ func (d *Daemon) init() error {
 
                d.cluster, err = db.OpenCluster(
                        "db.bin", store, clusterAddress, dir,
-                       d.config.DqliteSetupTimeout,
+                       d.config.DqliteSetupTimeout, dump,
                        dqlite.WithDialFunc(d.gateway.DialFunc()),
                        dqlite.WithContext(d.gateway.Context()),
                        dqlite.WithConnectionTimeout(10*time.Second),
diff --git a/lxd/db/db.go b/lxd/db/db.go
index b6633aafd6..b8137e4bb6 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -153,12 +153,14 @@ type Cluster struct {
 // - dialer: Function used to connect to the dqlite backend via gRPC SQL.
 // - address: Network address of this node (or empty string).
 // - dir: Base LXD database directory (e.g. /var/lib/lxd/database)
+// - timeout: Give up trying to open the database after this amount of time.
+// - dump: If not nil, a copy of 2.0 db data, for migrating to 3.0.
 //
 // The address and api parameters will be used to determine if the cluster
 // database matches our version, and possibly trigger a schema update. If the
 // schema update can't be performed right now, because some nodes are still
 // behind, an Upgrading error is returned.
-func OpenCluster(name string, store dqlite.ServerStore, address, dir string, 
timeout time.Duration, options ...dqlite.DriverOption) (*Cluster, error) {
+func OpenCluster(name string, store dqlite.ServerStore, address, dir string, 
timeout time.Duration, dump *Dump, options ...dqlite.DriverOption) (*Cluster, 
error) {
        db, err := cluster.Open(name, store, options...)
        if err != nil {
                return nil, errors.Wrap(err, "failed to open database")
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index cee1366481..71755e22de 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -63,7 +63,7 @@ func NewTestCluster(t *testing.T) (*Cluster, func()) {
        }
 
        cluster, err := OpenCluster(
-               "test.db", store, "1", "/unused/db/dir", 5*time.Second,
+               "test.db", store, "1", "/unused/db/dir", 5*time.Second, nil,
                dqlite.WithLogFunc(log), dqlite.WithDialFunc(dial))
        require.NoError(t, err)
 

From f007b1d1f8ce5afe181aa8256c2bf7523a4d7628 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanay...@canonical.com>
Date: Fri, 16 Aug 2019 10:38:20 +0200
Subject: [PATCH 4/4] Invoke data migration from db.OpenCluster, before schema
 updates

Signed-off-by: Free Ekanayaka <free.ekanay...@canonical.com>
---
 lxd/daemon.go            | 25 +-----------------------
 lxd/db/db.go             | 27 ++++++++++++++++++++++++++
 lxd/db/migration.go      | 41 +++++++++++++++++++++++++++-------------
 lxd/db/migration_test.go | 15 +++++++++++++--
 lxd/db/testing.go        | 18 +++++++++++-------
 5 files changed, 80 insertions(+), 46 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 58be2f8275..bf7ae49484 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -21,7 +21,7 @@ import (
        "github.com/gorilla/mux"
        "github.com/pkg/errors"
        "golang.org/x/sys/unix"
-       "gopkg.in/lxc/go-lxc.v2"
+       lxc "gopkg.in/lxc/go-lxc.v2"
 
        "gopkg.in/macaroon-bakery.v2/bakery"
        "gopkg.in/macaroon-bakery.v2/bakery/checkers"
@@ -728,29 +728,6 @@ func (d *Daemon) init() error {
        }
        d.gateway.Cluster = d.cluster
 
-       /* Migrate the node local data to the cluster database, if needed */
-       if dump != nil {
-               logger.Infof("Migrating data from local to global database")
-               err = d.cluster.ImportPreClusteringData(dump)
-               if err != nil {
-                       // Restore the local sqlite3 backup and wipe the raft
-                       // directory, so users can fix problems and retry.
-                       path := d.os.LocalDatabasePath()
-                       copyErr := shared.FileCopy(path+".bak", path)
-                       if copyErr != nil {
-                               // Ignore errors here, there's not much we can 
do
-                               logger.Errorf("Failed to restore local 
database: %v", copyErr)
-                       }
-                       rmErr := os.RemoveAll(d.os.GlobalDatabaseDir())
-                       if rmErr != nil {
-                               // Ignore errors here, there's not much we can 
do
-                               logger.Errorf("Failed to cleanup global 
database: %v", rmErr)
-                       }
-
-                       return fmt.Errorf("Failed to migrate data to global 
database: %v", err)
-               }
-       }
-
        // This logic used to belong to patchUpdateFromV10, but has been moved
        // here because it needs database access.
        if shared.PathExists(shared.VarPath("lxc")) {
diff --git a/lxd/db/db.go b/lxd/db/db.go
index b8137e4bb6..341982d4d9 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -3,6 +3,8 @@ package db
 import (
        "database/sql"
        "fmt"
+       "os"
+       "path/filepath"
        "sync"
        "time"
 
@@ -12,6 +14,7 @@ import (
        "github.com/lxc/lxd/lxd/db/cluster"
        "github.com/lxc/lxd/lxd/db/node"
        "github.com/lxc/lxd/lxd/db/query"
+       "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/logger"
 )
 
@@ -210,6 +213,30 @@ func OpenCluster(name string, store dqlite.ServerStore, 
address, dir string, tim
                }
        }
 
+       if dump != nil {
+               logger.Infof("Migrating data from local to global database")
+               err := query.Transaction(db, func(tx *sql.Tx) error {
+                       return importPreClusteringData(tx, dump)
+               })
+               if err != nil {
+                       // Restore the local sqlite3 backup and wipe the raft
+                       // directory, so users can fix problems and retry.
+                       path := filepath.Join(dir, "local.db")
+                       copyErr := shared.FileCopy(path+".bak", path)
+                       if copyErr != nil {
+                               // Ignore errors here, there's not much we can 
do
+                               logger.Errorf("Failed to restore local 
database: %v", copyErr)
+                       }
+                       rmErr := os.RemoveAll(filepath.Join(dir, "global"))
+                       if rmErr != nil {
+                               // Ignore errors here, there's not much we can 
do
+                               logger.Errorf("Failed to cleanup global 
database: %v", rmErr)
+                       }
+
+                       return nil, errors.Wrap(err, "Failed to migrate data to 
global database")
+               }
+       }
+
        nodesVersionsMatch, err := cluster.EnsureSchema(db, address, dir)
        if err != nil {
                return nil, errors.Wrap(err, "failed to ensure schema")
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 9becccf8f1..c213d4db75 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -102,18 +102,31 @@ var preClusteringTablesRequiringProjectID = []string{
 }
 
 // ImportPreClusteringData imports the data loaded with LoadPreClusteringData.
-func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
-       tx, err := c.db.Begin()
+func importPreClusteringData(tx *sql.Tx, dump *Dump) error {
+       // Create version 14 of the cluster database schema.
+       _, err := tx.Exec(clusterSchemaVersion14)
        if err != nil {
-               return errors.Wrap(err, "failed to start cluster database 
transaction")
+               return errors.Wrap(err, "Create cluster database schema version 
14")
        }
 
-       // Delete the default profile in the cluster database, which always
-       // gets created no matter what.
-       _, err = tx.Exec("DELETE FROM profiles WHERE id=1")
+       // Insert an entry for node 1.
+       stmt := `
+INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', 
'0.0.0.0', 14, 1)
+`
+       _, err = tx.Exec(stmt)
+       if err != nil {
+               return err
+       }
+
+       // Default project
+       stmt = `
+INSERT INTO projects (name, description) VALUES ('default', 'Default LXD 
project');
+INSERT INTO projects_config (project_id, key, value) VALUES (1, 
'features.images', 'true');
+INSERT INTO projects_config (project_id, key, value) VALUES (1, 
'features.profiles', 'true');
+`
+       _, err = tx.Exec(stmt)
        if err != nil {
-               tx.Rollback()
-               return errors.Wrap(err, "failed to delete default profile")
+               return err
        }
 
        for _, table := range preClusteringTables {
@@ -216,16 +229,13 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) 
error {
                        stmt += fmt.Sprintf(" VALUES %s", 
query.Params(len(columns)))
                        result, err := tx.Exec(stmt, row...)
                        if err != nil {
-                               tx.Rollback()
                                return errors.Wrapf(err, "failed to insert row 
%d into %s", i, table)
                        }
                        n, err := result.RowsAffected()
                        if err != nil {
-                               tx.Rollback()
                                return errors.Wrapf(err, "no result count for 
row %d of %s", i, table)
                        }
                        if n != 1 {
-                               tx.Rollback()
                                return fmt.Errorf("could not insert %d int %s", 
i, table)
                        }
 
@@ -237,7 +247,7 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error 
{
                }
        }
 
-       return tx.Commit()
+       return nil
 }
 
 // Insert a row in one of the nodes association tables (storage_pools_nodes,
@@ -699,6 +709,11 @@ CREATE TABLE storage_volumes_config (
     UNIQUE (storage_volume_id, key),
     FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE 
CASCADE
 );
-
+CREATE TABLE schema (
+    id         INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    version    INTEGER NOT NULL,
+    updated_at DATETIME NOT NULL,
+    UNIQUE (version)
+);
 INSERT INTO schema (version, updated_at) VALUES (14, strftime("%s"))
 `
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 316dd4f320..7e82e8b173 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -1,9 +1,13 @@
 package db_test
 
 import (
+       "context"
        "database/sql"
+       "net"
        "testing"
+       "time"
 
+       dqlite "github.com/canonical/go-dqlite"
        "github.com/lxc/lxd/lxd/db"
        "github.com/lxc/lxd/lxd/db/query"
        "github.com/stretchr/testify/assert"
@@ -40,11 +44,18 @@ func TestImportPreClusteringData(t *testing.T) {
        dump, err := db.LoadPreClusteringData(tx)
        require.NoError(t, err)
 
-       cluster, cleanup := db.NewTestCluster(t)
+       dir, store, cleanup := db.NewTestDqliteServer(t)
        defer cleanup()
 
-       err = cluster.ImportPreClusteringData(dump)
+       dial := func(ctx context.Context, address string) (net.Conn, error) {
+               return net.Dial("unix", address)
+       }
+
+       cluster, err := db.OpenCluster(
+               "test.db", store, "1", dir, 5*time.Second, dump,
+               dqlite.WithDialFunc(dial))
        require.NoError(t, err)
+       defer cluster.Close()
 
        // certificates
        certs, err := cluster.CertificatesGet()
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 71755e22de..b027ed2b33 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -6,6 +6,7 @@ import (
        "io/ioutil"
        "net"
        "os"
+       "path/filepath"
        "testing"
        "time"
 
@@ -54,7 +55,7 @@ func NewTestNodeTx(t *testing.T) (*NodeTx, func()) {
 // that can be used to clean it up when done.
 func NewTestCluster(t *testing.T) (*Cluster, func()) {
        // Create an in-memory dqlite SQL server and associated store.
-       store, serverCleanup := newDqliteServer(t)
+       dir, store, serverCleanup := NewTestDqliteServer(t)
 
        log := newLogFunc(t)
 
@@ -63,7 +64,7 @@ func NewTestCluster(t *testing.T) (*Cluster, func()) {
        }
 
        cluster, err := OpenCluster(
-               "test.db", store, "1", "/unused/db/dir", 5*time.Second, nil,
+               "test.db", store, "1", dir, 5*time.Second, nil,
                dqlite.WithLogFunc(log), dqlite.WithDialFunc(dial))
        require.NoError(t, err)
 
@@ -95,10 +96,11 @@ func NewTestClusterTx(t *testing.T) (*ClusterTx, func()) {
        return clusterTx, cleanup
 }
 
-// Create a new in-memory dqlite server.
+// NewTestDqliteServer creates a new test dqlite server.
 //
-// Return the newly created server store can be used to connect to it.
-func newDqliteServer(t *testing.T) (*dqlite.DatabaseServerStore, func()) {
+// Return the directory backing the test server and a newly created server
+// store that can be used to connect to it.
+func NewTestDqliteServer(t *testing.T) (string, *dqlite.DatabaseServerStore, 
func()) {
        t.Helper()
 
        listener, err := net.Listen("unix", "")
@@ -107,9 +109,11 @@ func newDqliteServer(t *testing.T) 
(*dqlite.DatabaseServerStore, func()) {
        address := listener.Addr().String()
 
        dir, dirCleanup := newDir(t)
+       err = os.Mkdir(filepath.Join(dir, "global"), 0755)
+       require.NoError(t, err)
 
        info := dqlite.ServerInfo{ID: uint64(1), Address: 
listener.Addr().String()}
-       server, err := dqlite.NewServer(info, dir)
+       server, err := dqlite.NewServer(info, filepath.Join(dir, "global"))
        require.NoError(t, err)
 
        err = server.Bootstrap([]dqlite.ServerInfo{info})
@@ -128,7 +132,7 @@ func newDqliteServer(t *testing.T) 
(*dqlite.DatabaseServerStore, func()) {
        ctx := context.Background()
        require.NoError(t, store.Set(ctx, []dqlite.ServerInfo{{Address: 
address}}))
 
-       return store, cleanup
+       return dir, store, cleanup
 }
 
 var dqliteSerial = 0
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to