Repository: phoenix Updated Branches: refs/heads/4.8-HBase-1.1 b8da6bcf3 -> f32e843a6
PHOENIX-808 Create snapshot of SYSTEM.CATALOG prior to upgrade and restore on any failure Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f32e843a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f32e843a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f32e843a Branch: refs/heads/4.8-HBase-1.1 Commit: f32e843a6a9a4b3f25b010276971cd5cebcffe4b Parents: b8da6bc Author: Samarth <[email protected]> Authored: Wed Aug 31 23:48:15 2016 -0700 Committer: Samarth <[email protected]> Committed: Wed Aug 31 23:48:15 2016 -0700 ---------------------------------------------------------------------- .../phoenix/coprocessor/MetaDataProtocol.java | 19 ++ .../query/ConnectionQueryServicesImpl.java | 334 +++++++++++++------ .../org/apache/phoenix/util/UpgradeUtil.java | 11 + 3 files changed, 253 insertions(+), 111 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/f32e843a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java index 8982fe7..dce89bd 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java @@ -19,7 +19,9 @@ package org.apache.phoenix.coprocessor; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.phoenix.coprocessor.generated.MetaDataProtos; @@ -27,6 +29,7 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService; import org.apache.phoenix.coprocessor.generated.PFunctionProtos; import org.apache.phoenix.hbase.index.util.VersionUtil; +import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.parse.PFunction; import org.apache.phoenix.parse.PSchema; import org.apache.phoenix.schema.PColumn; @@ -83,6 +86,22 @@ public abstract class MetaDataProtocol extends MetaDataService { public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 = MIN_TABLE_TIMESTAMP + 18; // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0; + + // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). + // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string. + public static final Map<Long, String> TIMESTAMP_VERSION_MAP = new HashMap<>(10); + static { + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0, "4.1.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0, "4.2.0"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1, "4.2.1"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, "4.3.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, "4.5.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, "4.6.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, "4.7.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, "4.8.x"); + } + public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER; + // TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, nor should we need // a different code for every type of error. // ENTITY_ALREADY_EXISTS, ENTITY_NOT_FOUND, NEWER_ENTITY_FOUND, ENTITY_NOT_IN_REGION, CONCURRENT_MODIFICATION http://git-wip-us.apache.org/repos/asf/phoenix/blob/f32e843a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index e30efa2..ad2346f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -18,15 +18,18 @@ package org.apache.phoenix.query; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.hadoop.hbase.HColumnDescriptor.TTL; +import static org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP; import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION; import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION; import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_ENABLED; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS; +import static org.apache.phoenix.util.UpgradeUtil.getUpgradeSnapshotName; import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0; import java.io.IOException; @@ -82,7 +85,6 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -2290,51 +2292,56 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement } checkClosed(); PhoenixConnection metaConnection = null; + boolean success = false; + String snapshotName = null; + String sysCatalogTableName = null; try { openConnection(); String noUpgradeProp = props.getProperty(PhoenixRuntime.NO_UPGRADE_ATTRIB); - boolean upgradeSystemTables = !Boolean.TRUE.equals(Boolean.valueOf(noUpgradeProp)); - Properties scnProps = PropertiesUtil.deepCopy(props); - scnProps.setProperty( - PhoenixRuntime.CURRENT_SCN_ATTRIB, - Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); - scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); - metaConnection = new PhoenixConnection( - ConnectionQueryServicesImpl.this, globalUrl, scnProps, newEmptyMetaData()); - try (HBaseAdmin admin = getAdmin()) { - boolean mappedSystemCatalogExists = admin - .tableExists(SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true)); - if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, - ConnectionQueryServicesImpl.this.getProps())) { - if (admin.tableExists(SYSTEM_CATALOG_NAME_BYTES)) { - //check if the server is already updated and have namespace config properly set. - checkClientServerCompatibility(SYSTEM_CATALOG_NAME_BYTES); - } - ensureSystemTablesUpgraded(ConnectionQueryServicesImpl.this.getProps()); - } else if (mappedSystemCatalogExists) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INCONSISTENET_NAMESPACE_MAPPING_PROPERTIES) - .setMessage("Cannot initiate connection as " - + SchemaUtil.getPhysicalTableName( - SYSTEM_CATALOG_NAME_BYTES, true) - + " is found but client does not have " - + IS_NAMESPACE_MAPPING_ENABLED + " enabled") - .build().buildException(); } - } - - try { - metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA); - - } catch (NewerTableAlreadyExistsException ignore) { - // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed timestamp. - // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp. - } catch (TableAlreadyExistsException e) { - if (upgradeSystemTables) { + boolean upgradeSystemTables = !Boolean.TRUE.equals(Boolean.valueOf(noUpgradeProp)); + Properties scnProps = PropertiesUtil.deepCopy(props); + scnProps.setProperty( + PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); + scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); + String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); + metaConnection = new PhoenixConnection( + ConnectionQueryServicesImpl.this, globalUrl, scnProps, newEmptyMetaData()); + try (HBaseAdmin admin = getAdmin()) { + boolean mappedSystemCatalogExists = admin + .tableExists(SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true)); + if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, + ConnectionQueryServicesImpl.this.getProps())) { + if (admin.tableExists(SYSTEM_CATALOG_NAME_BYTES)) { + //check if the server is already updated and have namespace config properly set. + checkClientServerCompatibility(SYSTEM_CATALOG_NAME_BYTES); + } + ensureSystemTablesUpgraded(ConnectionQueryServicesImpl.this.getProps()); + } else if (mappedSystemCatalogExists) { throw new SQLExceptionInfo.Builder( + SQLExceptionCode.INCONSISTENET_NAMESPACE_MAPPING_PROPERTIES) + .setMessage("Cannot initiate connection as " + + SchemaUtil.getPhysicalTableName( + SYSTEM_CATALOG_NAME_BYTES, true) + + " is found but client does not have " + + IS_NAMESPACE_MAPPING_ENABLED + " enabled") + .build().buildException(); } + } + try { + metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA); + } catch (NewerTableAlreadyExistsException ignore) { + // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed timestamp. + // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp. + } catch (TableAlreadyExistsException e) { + if (upgradeSystemTables) { + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + sysCatalogTableName = e.getTable().getPhysicalName().getString(); + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) { + snapshotName = getUpgradeSnapshotName(sysCatalogTableName, currentServerSideTableTimeStamp); + createSnapshot(snapshotName, sysCatalogTableName); + } + String columnsToAdd = ""; // This will occur if we have an older SYSTEM.CATALOG and we need to update it to include // any new columns we've added. - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - - String columnsToAdd = ""; if(currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { // We know that we always need to add the STORE_NULLS column for 4.3 release columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName()); @@ -2462,84 +2469,85 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0); clearCache(); } - } } + } - int nSaltBuckets = ConnectionQueryServicesImpl.this.props.getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, - QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - try { - String createSequenceTable = Sequence.getCreateTableStatement(nSaltBuckets); - metaConnection.createStatement().executeUpdate(createSequenceTable); - nSequenceSaltBuckets = nSaltBuckets; - } catch (NewerTableAlreadyExistsException e) { - // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed timestamp. - // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp. - nSequenceSaltBuckets = getSaltBuckets(e); - } catch (TableAlreadyExistsException e) { - if (upgradeSystemTables) { - // This will occur if we have an older SYSTEM.SEQUENCE and we need to update it to include - // any new columns we've added. - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { - // If the table time stamp is before 4.1.0 then we need to add below columns - // to the SYSTEM.SEQUENCE table. - String columnsToAdd = PhoenixDatabaseMetaData.MIN_VALUE + " " + PLong.INSTANCE.getSqlTypeName() - + ", " + PhoenixDatabaseMetaData.MAX_VALUE + " " + PLong.INSTANCE.getSqlTypeName() - + ", " + PhoenixDatabaseMetaData.CYCLE_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName() - + ", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName(); - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd); - } - // If the table timestamp is before 4.2.1 then run the upgrade script - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1) { - if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSaltBuckets, e.getTable())) { - metaConnection.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); - clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA_BYTES, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE_BYTES, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); - clearTableRegionCache(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); - } - nSequenceSaltBuckets = nSaltBuckets; - } else { - nSequenceSaltBuckets = getSaltBuckets(e); + int nSaltBuckets = ConnectionQueryServicesImpl.this.props.getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + try { + String createSequenceTable = Sequence.getCreateTableStatement(nSaltBuckets); + metaConnection.createStatement().executeUpdate(createSequenceTable); + nSequenceSaltBuckets = nSaltBuckets; + } catch (NewerTableAlreadyExistsException e) { + // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed timestamp. + // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp. + nSequenceSaltBuckets = getSaltBuckets(e); + } catch (TableAlreadyExistsException e) { + if (upgradeSystemTables) { + // This will occur if we have an older SYSTEM.SEQUENCE and we need to update it to include + // any new columns we've added. + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { + // If the table time stamp is before 4.1.0 then we need to add below columns + // to the SYSTEM.SEQUENCE table. + String columnsToAdd = PhoenixDatabaseMetaData.MIN_VALUE + " " + PLong.INSTANCE.getSqlTypeName() + + ", " + PhoenixDatabaseMetaData.MAX_VALUE + " " + PLong.INSTANCE.getSqlTypeName() + + ", " + PhoenixDatabaseMetaData.CYCLE_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName() + + ", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName(); + addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd); + } + // If the table timestamp is before 4.2.1 then run the upgrade script + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1) { + if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSaltBuckets, e.getTable())) { + metaConnection.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); + clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA_BYTES, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE_BYTES, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); + clearTableRegionCache(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); } + nSequenceSaltBuckets = nSaltBuckets; + } else { + nSequenceSaltBuckets = getSaltBuckets(e); } } - try { - metaConnection.createStatement().executeUpdate( - QueryConstants.CREATE_STATS_TABLE_METADATA); - } catch (NewerTableAlreadyExistsException ignore) { - } catch(TableAlreadyExistsException e) { - if (upgradeSystemTables) { - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_STATS_NAME, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, - PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT + " " - + PLong.INSTANCE.getSqlTypeName()); - } + } + try { + metaConnection.createStatement().executeUpdate( + QueryConstants.CREATE_STATS_TABLE_METADATA); + } catch (NewerTableAlreadyExistsException ignore) { + } catch(TableAlreadyExistsException e) { + if (upgradeSystemTables) { + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { + metaConnection = addColumnsIfNotExists( + metaConnection, + SYSTEM_STATS_NAME, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, + PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT + " " + + PLong.INSTANCE.getSqlTypeName()); } } + } + try { + metaConnection.createStatement().executeUpdate( + QueryConstants.CREATE_FUNCTION_METADATA); + } catch (NewerTableAlreadyExistsException e) { + } catch (TableAlreadyExistsException e) { + } + if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, + ConnectionQueryServicesImpl.this.getProps())) { try { - metaConnection.createStatement().executeUpdate( - QueryConstants.CREATE_FUNCTION_METADATA); - } catch (NewerTableAlreadyExistsException e) { - } catch (TableAlreadyExistsException e) { - } - if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, - ConnectionQueryServicesImpl.this.getProps())) { - try { - metaConnection.createStatement().executeUpdate("CREATE SCHEMA IF NOT EXISTS " - + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA); - } catch (NewerSchemaAlreadyExistsException e) {} - } - scheduleRenewLeaseTasks(); + metaConnection.createStatement().executeUpdate("CREATE SCHEMA IF NOT EXISTS " + + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA); + } catch (NewerSchemaAlreadyExistsException e) {} + } + success = true; + scheduleRenewLeaseTasks(); } catch (Exception e) { if (e instanceof SQLException) { initializationException = (SQLException)e; @@ -2558,6 +2566,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement } } finally { try { + restoreFromSnapshot(sysCatalogTableName, snapshotName, success); + } catch (SQLException e) { + if (initializationException != null) { + initializationException.setNextException(e); + } else { + initializationException = e; + } + } + try { if (initializationException != null) { throw initializationException; } @@ -2570,6 +2587,101 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement return null; } + private void createSnapshot(String snapshotName, String tableName) + throws SQLException { + HBaseAdmin admin = null; + SQLException sqlE = null; + try { + admin = getAdmin(); + admin.snapshot(snapshotName, tableName); + logger.info("Successfully created snapshot " + snapshotName + " for " + + tableName); + } catch (Exception e) { + sqlE = new SQLException(e); + } finally { + try { + if (admin != null) { + admin.close(); + } + } catch (Exception e) { + SQLException adminCloseEx = new SQLException(e); + if (sqlE == null) { + sqlE = adminCloseEx; + } else { + sqlE.setNextException(adminCloseEx); + } + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + } + + private void restoreFromSnapshot(String tableName, String snapshotName, + boolean success) throws SQLException { + boolean snapshotRestored = false; + boolean tableDisabled = false; + if (!success && snapshotName != null) { + SQLException sqlE = null; + HBaseAdmin admin = null; + try { + logger.warn("Starting restore of " + tableName + " using snapshot " + + snapshotName + " because upgrade failed"); + admin = getAdmin(); + admin.disableTable(tableName); + tableDisabled = true; + admin.restoreSnapshot(snapshotName); + snapshotRestored = true; + logger.warn("Successfully restored " + tableName + " using snapshot " + + snapshotName); + } catch (Exception e) { + sqlE = new SQLException(e); + } finally { + if (admin != null && tableDisabled) { + try { + admin.enableTable(tableName); + if (snapshotRestored) { + logger.warn("Successfully restored and enabled " + tableName + " using snapshot " + + snapshotName); + } else { + logger.warn("Successfully enabled " + tableName + " after restoring using snapshot " + + snapshotName + " failed. "); + } + } catch (Exception e1) { + SQLException enableTableEx = new SQLException(e1); + if (sqlE == null) { + sqlE = enableTableEx; + } else { + sqlE.setNextException(enableTableEx); + } + logger.error("Failure in enabling " + + tableName + + (snapshotRestored ? " after successfully restoring using snapshot" + + snapshotName + : " after restoring using snapshot " + + snapshotName + " failed. ")); + } finally { + try { + admin.close(); + } catch (Exception e2) { + SQLException adminCloseEx = new SQLException(e2); + if (sqlE == null) { + sqlE = adminCloseEx; + } else { + sqlE.setNextException(adminCloseEx); + } + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + } + } + } + } + private void ensureSystemTablesUpgraded(ReadOnlyProps props) throws SQLException, IOException, IllegalArgumentException, InterruptedException { if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props)) { return; } http://git-wip-us.apache.org/repos/asf/phoenix/blob/f32e843a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java index 7ed9f70..863b082 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java @@ -18,6 +18,8 @@ package org.apache.phoenix.util; import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.phoenix.coprocessor.MetaDataProtocol.CURRENT_CLIENT_VERSION; +import static org.apache.phoenix.coprocessor.MetaDataProtocol.TIMESTAMP_VERSION_MAP; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY; @@ -51,10 +53,13 @@ import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_ import java.io.IOException; import java.sql.Connection; +import java.sql.Date; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.text.Format; +import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -1888,4 +1893,10 @@ public class UpgradeUtil { } } + public static final String getUpgradeSnapshotName(String tableString, long currentSystemTableTimestamp) { + Format formatter = new SimpleDateFormat("yyMMddHHmmssZ"); + String date = formatter.format(new Date(System.currentTimeMillis())); + String upgradingFrom = TIMESTAMP_VERSION_MAP.get(currentSystemTableTimestamp); + return "SNAPSHOT_" + tableString + "_" + upgradingFrom + "_TO_" + CURRENT_CLIENT_VERSION + "_" + date; + } } \ No newline at end of file
