This is an automated email from the ASF dual-hosted git repository.
richardantal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/master by this push:
new 81d6cb2203 PHOENIX-6870 Add cluster-wide metadata upgrade block (#1927)
81d6cb2203 is described below
commit 81d6cb22032104fda537114b93eba7106328fc93
Author: richardantal <[email protected]>
AuthorDate: Wed Aug 28 10:03:52 2024 +0200
PHOENIX-6870 Add cluster-wide metadata upgrade block (#1927)
---
.../apache/phoenix/exception/SQLExceptionCode.java | 2 +
.../phoenix/exception/UpgradeBlockedException.java | 26 +++++
.../phoenix/query/ConnectionQueryServicesImpl.java | 45 +++++++-
.../org/apache/phoenix/query/QueryServices.java | 5 +
.../apache/phoenix/end2end/BlockedUpgradeIT.java | 63 +++++++++++
...hotIT.java => LoadSystemTableSnapshotBase.java} | 126 +++++++++++++--------
.../phoenix/end2end/SkipBlockUpgradeCheckIT.java | 65 +++++++++++
.../phoenix/end2end/UpgradeFromSnapshotIT.java | 79 +++++++++++++
8 files changed, 358 insertions(+), 53 deletions(-)
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 24abfc4bb4..c060d4d20f 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -511,6 +511,8 @@ public enum SQLExceptionCode {
ROW_VALUE_CONSTRUCTOR_OFFSET_INTERNAL_ERROR(2015, "INT17", "Row Value
Constructor Offset had an Unexpected Error."),
ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_ALLOWED_IN_QUERY(2016, "INT18", "Row
Value Constructor Offset Not Allowed In Query."),
+ UPGRADE_BLOCKED(2017, "INT19", ""),
+
OPERATION_TIMED_OUT(6000, "TIM01", "Operation timed out.", new Factory() {
@Override
public SQLException newException(SQLExceptionInfo info) {
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java
new file mode 100644
index 0000000000..6313879a83
--- /dev/null
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.exception;
+
+
+public class UpgradeBlockedException extends RetriableUpgradeException {
+ public UpgradeBlockedException() {
+ super("Upgrade is BLOCKED by a SYSTEM.MUTEX row",
SQLExceptionCode.UPGRADE_BLOCKED
+ .getSQLState(),
SQLExceptionCode.UPGRADE_BLOCKED.getErrorCode());
+ }
+}
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 26254451b8..22b0596dca 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -52,6 +52,7 @@ import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCH
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES;
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_HBASE_TABLE_NAME;
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES;
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME;
import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME;
@@ -158,6 +159,7 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -222,6 +224,7 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.exception.UpgradeInProgressException;
import org.apache.phoenix.exception.UpgradeNotRequiredException;
import org.apache.phoenix.exception.UpgradeRequiredException;
+import org.apache.phoenix.exception.UpgradeBlockedException;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
import org.apache.phoenix.hbase.index.util.VersionUtil;
@@ -4447,6 +4450,7 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
metaConnection = new
PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl,
scnProps);
metaConnection.setRunningUpgrade(true);
+
// Always try to create SYSTEM.MUTEX table first since we need it
to acquire the
// upgrade mutex. Upgrade or migration is not possible without the
upgrade mutex
try (Admin admin = getAdmin()) {
@@ -4478,6 +4482,14 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
currentServerSideTableTimeStamp =
caughtTableAlreadyExistsException.getTable().getTimeStamp();
}
+
+ ReadOnlyProps readOnlyProps =
metaConnection.getQueryServices().getProps();
+ String skipUpgradeBlock =
readOnlyProps.get(SKIP_UPGRADE_BLOCK_CHECK);
+
+ if (skipUpgradeBlock == null ||
!Boolean.valueOf(skipUpgradeBlock)) {
+ checkUpgradeBlockMutex();
+ }
+
acquiredMutexLock = acquireUpgradeMutex(
MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP);
LOGGER.debug(
@@ -5125,6 +5137,31 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
}
}
+ /**
+ * Acquire distributed mutex of sorts to make sure only one JVM is able to
run the upgrade code by
+ * making use of HBase's checkAndPut api.
+ *
+ * @return true if client won the race, false otherwise
+ * @throws SQLException
+ */
+ @VisibleForTesting
+ public boolean checkUpgradeBlockMutex()
+ throws SQLException {
+ try (Table sysMutexTable = getSysMutexTable()) {
+ final byte[] rowKey = Bytes.toBytes("BLOCK_UPGRADE");
+
+ Get get = new
Get(rowKey).addColumn(SYSTEM_MUTEX_FAMILY_NAME_BYTES,
SYSTEM_MUTEX_COLUMN_NAME_BYTES);
+ Result r = sysMutexTable.get(get);
+
+ if (!r.isEmpty()) {
+ throw new UpgradeBlockedException();
+ }
+ } catch (IOException e) {
+ throw ClientUtil.parseServerException(e);
+ }
+ return true;
+ }
+
/**
* Acquire distributed mutex of sorts to make sure only one JVM is able to
run the upgrade code by
* making use of HBase's checkAndPut api.
@@ -5155,13 +5192,10 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
// at this point the system mutex table should have been created or
// an exception thrown
try (Table sysMutexTable = getSysMutexTable()) {
- byte[] family =
PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
- byte[] qualifier =
PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES;
- byte[] value = MUTEX_LOCKED;
Put put = new Put(rowKey);
- put.addColumn(family, qualifier, value);
+ put.addColumn(SYSTEM_MUTEX_FAMILY_NAME_BYTES,
SYSTEM_MUTEX_COLUMN_NAME_BYTES, MUTEX_LOCKED);
CheckAndMutate checkAndMutate =
CheckAndMutate.newBuilder(rowKey)
- .ifNotExists(family, qualifier)
+ .ifNotExists(SYSTEM_MUTEX_FAMILY_NAME_BYTES,
SYSTEM_MUTEX_COLUMN_NAME_BYTES)
.build(put);
boolean checkAndPut =
sysMutexTable.checkAndMutate(checkAndMutate).isSuccess();
@@ -5189,6 +5223,7 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null);
}
+
@Override
public void deleteMutexCell(String tenantId, String schemaName, String
tableName,
String columnName, String familyName) throws SQLException {
diff --git
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java
index 24c72521d8..27e2bfed6c 100644
---
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java
+++
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -442,6 +442,11 @@ public interface QueryServices extends SQLCloseable {
*/
String SKIP_SYSTEM_TABLES_EXISTENCE_CHECK =
"phoenix.skip.system.tables.existence.check";
+ /**
+ * Parameter to skip the minimum version check for system table upgrades
+ */
+ String SKIP_UPGRADE_BLOCK_CHECK = "phoenix.skip.upgrade.block.check";
+
/**
* Config key to represent max region locations to be displayed as part of
the Explain plan
* output.
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BlockedUpgradeIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BlockedUpgradeIT.java
new file mode 100644
index 0000000000..9714e03770
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BlockedUpgradeIT.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.UpgradeBlockedException;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.Map;
+
+import static org.junit.Assert.assertThrows;
+
+/**
+ * This is a not a standard IT.
+ * It is starting point for writing ITs that load specific tables from a
snapshot.
+ * Tests based on this IT are meant for debugging specific problems where
HBase table snapshots are
+ * available for replication, and are not meant to be part of the standard
test suite
+ * (or even being committed to the ASF branches)
+ */
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class BlockedUpgradeIT extends LoadSystemTableSnapshotBase {
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ boolean blockUpgrade = true;
+ setupCluster(blockUpgrade);
+ }
+
+ @Test
+ public void testPhoenixUpgradeBlocked() throws Exception {
+
+ Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+ serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+ serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+ Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
+ clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+
+ // Start
+ assertThrows(UpgradeBlockedException.class, () -> setUpTestDriver(new
ReadOnlyProps(serverProps.entrySet().iterator()), new
ReadOnlyProps(clientProps.entrySet()
+ .iterator())));
+ }
+}
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LoadSystemTableSnapshotIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LoadSystemTableSnapshotBase.java
similarity index 64%
rename from
phoenix-core/src/it/java/org/apache/phoenix/end2end/LoadSystemTableSnapshotIT.java
rename to
phoenix-core/src/it/java/org/apache/phoenix/end2end/LoadSystemTableSnapshotBase.java
index 603e5bd7b2..003ed5ebc0 100644
---
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LoadSystemTableSnapshotIT.java
+++
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LoadSystemTableSnapshotBase.java
@@ -17,45 +17,48 @@
*/
package org.apache.phoenix.end2end;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-
-import java.io.File;
-import java.io.IOException;
-
-import java.net.URL;
-
-import java.util.HashMap;
-
-import java.util.Map;
-import java.util.Map.Entry;
-
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.compress.utils.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.CheckAndMutate;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
-
+import org.apache.phoenix.exception.UpgradeBlockedException;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ClientUtil;
import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import org.apache.phoenix.util.SchemaUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-
+import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
+import static org.junit.Assert.*;
/**
* This is a not a standard IT.
@@ -66,17 +69,17 @@ import java.io.FileOutputStream;
*/
//TODO:- Snapshot here is storing integers as TTL Value and Phoenix Level TTL
is Long, need to work on this.
-@Category(NeedsOwnMiniClusterTest.class)
-public class LoadSystemTableSnapshotIT extends BaseTest {
+public abstract class LoadSystemTableSnapshotBase extends BaseTest {
private static final Logger LOGGER = LoggerFactory.getLogger(
- LoadSystemTableSnapshotIT.class);
+ LoadSystemTableSnapshotBase.class);
public static final String SNAPSHOT_DIR = "snapshots4_7/";
public static String rootDir;
private static final HashMap<String, String> SNAPSHOTS_TO_LOAD;
-// private static final HashMap<String, String> SNAPSHOTS_TO_RESTORE;
+
+ public static final byte[] MUTEX_LOCKED =
"MUTEX_LOCKED".getBytes(StandardCharsets.UTF_8);
static {
SNAPSHOTS_TO_LOAD = new HashMap<>();
@@ -105,8 +108,7 @@ public class LoadSystemTableSnapshotIT extends BaseTest {
}
}
- @BeforeClass
- public static synchronized void doSetup() throws Exception {
+ public static synchronized void setupCluster(boolean
createBlockUpgradeMutex) throws Exception {
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
@@ -116,11 +118,10 @@ public class LoadSystemTableSnapshotIT extends BaseTest {
//Start minicluster without Phoenix first
checkClusterInitialized(new
ReadOnlyProps(serverProps.entrySet().iterator()));
- URL folderUrl = LoadSystemTableSnapshotIT.class.getClassLoader()
+ URL folderUrl = LoadSystemTableSnapshotBase.class.getClassLoader()
.getResource(SNAPSHOT_DIR);
// extract the tar
-
File archive = new File(folderUrl.getFile() + "snapshots47.tar.gz");
File destination = new File(folderUrl.getFile());
@@ -133,6 +134,49 @@ public class LoadSystemTableSnapshotIT extends BaseTest {
String snapshotLoc = new
File(folderUrl.getFile()).getAbsolutePath() + "/" + snapshot.getKey();
importSnapshot(snapshot.getKey(), snapshot.getValue(),
snapshotLoc);
}
+
+ if (createBlockUpgradeMutex) {
+ try {
+
+ Admin admin = utility.getAdmin();
+ TableName mutexTableName = null;
+ try {
+ mutexTableName = SchemaUtil.getPhysicalTableName(
+ SYSTEM_MUTEX_NAME, new
ReadOnlyProps(serverProps.entrySet().iterator()));
+
+ TableDescriptor tableDesc =
TableDescriptorBuilder.newBuilder(mutexTableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder
+
.newBuilder(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES)
+ .setTimeToLive(TTL_FOR_MUTEX).build())
+ .build();
+ admin.createTable(tableDesc);
+ }
+ catch (IOException e) {
+ throw e;
+ }
+
+ org.apache.hadoop.hbase.client.Connection hbaseConn =
ConnectionFactory.createConnection(getUtility().getConfiguration());
+ Table sysMutexTable = hbaseConn.getTable(mutexTableName);
+
+ final byte[] rowKey = Bytes.toBytes("BLOCK_UPGRADE");
+
+ byte[] family =
PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
+ byte[] qualifier =
PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES;
+ Put put = new Put(rowKey);
+ put.addColumn(family, qualifier, MUTEX_LOCKED);
+ CheckAndMutate checkAndMutate =
CheckAndMutate.newBuilder(rowKey)
+ .ifNotExists(family, qualifier)
+ .build(put);
+
+ boolean checkAndPut =
sysMutexTable.checkAndMutate(checkAndMutate).isSuccess();
+
+ if (!checkAndPut) {
+ throw new UpgradeBlockedException();
+ }
+ } catch (IOException e) {
+ throw ClientUtil.parseServerException(e);
+ }
+ }
}
private static void importSnapshot(String key, String value, String loc)
throws IOException {
@@ -161,18 +205,4 @@ public class LoadSystemTableSnapshotIT extends BaseTest {
utility.getAdmin().restoreSnapshot(key);
}
- @Test
- public void testPhoenixUpgrade() throws Exception {
- Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
- serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
- serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
- Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
- clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
-
- //Now we can start Phoenix
- setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
new ReadOnlyProps(clientProps.entrySet()
- .iterator()));
- assertTrue(true);
- }
-
}
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipBlockUpgradeCheckIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipBlockUpgradeCheckIT.java
new file mode 100644
index 0000000000..d19f810745
--- /dev/null
+++
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipBlockUpgradeCheckIT.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.Map;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This is a not a standard IT.
+ * It is starting point for writing ITs that load specific tables from a
snapshot.
+ * Tests based on this IT are meant for debugging specific problems where
HBase table snapshots are
+ * available for replication, and are not meant to be part of the standard
test suite
+ * (or even being committed to the ASF branches)
+ */
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class SkipBlockUpgradeCheckIT extends LoadSystemTableSnapshotBase {
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ boolean blockUpgrade = true;
+ setupCluster(blockUpgrade);
+ }
+
+ @Test
+ public void testPhoenixUpgradeBlockUpgradeCheckSkipped() throws Exception {
+
+ Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+ serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+ serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+ Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
+ clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+
+ clientProps.put(QueryServices.SKIP_UPGRADE_BLOCK_CHECK, "True");
+
+ //Now we can start Phoenix and skip the upgrade block check
+ setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
new ReadOnlyProps(clientProps.entrySet()
+ .iterator()));
+ assertTrue(true);
+ }
+}
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeFromSnapshotIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeFromSnapshotIT.java
new file mode 100644
index 0000000000..348191797e
--- /dev/null
+++
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeFromSnapshotIT.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.TableName;
+
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ReadOnlyProps;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * This is a not a standard IT.
+ * It is starting point for writing ITs that load specific tables from a
snapshot.
+ * Tests based on this IT are meant for debugging specific problems where
HBase table snapshots are
+ * available for replication, and are not meant to be part of the standard
test suite
+ * (or even being committed to the ASF branches)
+ */
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class UpgradeFromSnapshotIT extends LoadSystemTableSnapshotBase {
+ static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new
HashSet<>(Arrays.asList(
+ "SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS",
"SYSTEM:FUNCTION", "SYSTEM:MUTEX", "SYSTEM:CHILD_LINK","SYSTEM:TRANSFORM"));
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ boolean blockUpgrade = false;
+ setupCluster(blockUpgrade);
+ }
+
+ @Test
+ public void testPhoenixUpgrade() throws Exception {
+ Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+ serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+ serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+ Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
+ clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+
+ //Now we can start Phoenix
+ setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
new ReadOnlyProps(clientProps.entrySet()
+ .iterator()));
+ assertTrue(true);
+
+ // Check the System Tables after upgrade
+ Set<String> tables = new HashSet<>();
+ for (TableName tn : utility.getAdmin().listTableNames()) {
+ tables.add(tn.getNameAsString());
+ }
+ assertTrue("HBase tables do not include expected Phoenix tables: " +
tables,
+ tables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+
+ }
+}