Repository: hbase Updated Branches: refs/heads/master 20524080b -> c5c395b68
HBASE-12623 removes unused code to upgrade from pre-0.96 to 0.96. Removes both insertion of namespaces and migration of zookeeper data to protobufs. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5c395b6 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5c395b6 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5c395b6 Branch: refs/heads/master Commit: c5c395b68a1379e3cb85a01e90dcc94c13b1cc6c Parents: 2052408 Author: Sean Busbey <bus...@apache.org> Authored: Tue Dec 2 16:57:46 2014 -0600 Committer: Sean Busbey <bus...@apache.org> Committed: Wed Dec 3 13:13:12 2014 -0600 ---------------------------------------------------------------------- .../hbase/migration/NamespaceUpgrade.java | 575 ------------------- .../hadoop/hbase/util/ZKDataMigrator.java | 239 +------- 2 files changed, 5 insertions(+), 809 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/c5c395b6/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java deleted file mode 100644 index 19bfa8c..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java +++ /dev/null @@ -1,575 +0,0 @@ -/** - * The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.migration; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Comparator; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.security.access.AccessControlLists; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.util.Tool; - -import com.google.common.collect.Lists; -import com.google.common.primitives.Ints; - -/** - * Upgrades old 0.94 filesystem layout to namespace layout - * Does the following: - * - * - creates system namespace directory and move .META. table there - * renaming .META. table to hbase:meta, - * this in turn would require to re-encode the region directory name - * - * <p>The pre-0.96 paths and dir names are hardcoded in here. - */ -public class NamespaceUpgrade implements Tool { - private static final Log LOG = LogFactory.getLog(NamespaceUpgrade.class); - - private Configuration conf; - - private FileSystem fs; - - private Path rootDir; - private Path sysNsDir; - private Path defNsDir; - private Path baseDirs[]; - private Path backupDir; - // First move everything to this tmp .data dir in case there is a table named 'data' - private static final String TMP_DATA_DIR = ".data"; - // Old dir names to migrate. - private static final String DOT_LOGS = ".logs"; - private static final String DOT_OLD_LOGS = ".oldlogs"; - private static final String DOT_CORRUPT = ".corrupt"; - private static final String DOT_SPLITLOG = "splitlog"; - private static final String DOT_ARCHIVE = ".archive"; - - // The old default directory of hbase.dynamic.jars.dir(0.94.12 release). - private static final String DOT_LIB_DIR = ".lib"; - - private static final String OLD_ACL = "_acl_"; - /** Directories that are not HBase table directories */ - static final List<String> NON_USER_TABLE_DIRS = Arrays.asList(new String[] { - DOT_LOGS, - DOT_OLD_LOGS, - DOT_CORRUPT, - DOT_SPLITLOG, - HConstants.HBCK_SIDELINEDIR_NAME, - DOT_ARCHIVE, - HConstants.SNAPSHOT_DIR_NAME, - HConstants.HBASE_TEMP_DIRECTORY, - TMP_DATA_DIR, - OLD_ACL, - DOT_LIB_DIR}); - - public NamespaceUpgrade() throws IOException { - super(); - } - - public void init() throws IOException { - this.rootDir = FSUtils.getRootDir(conf); - FSUtils.setFsDefault(getConf(), rootDir); - this.fs = FileSystem.get(conf); - Path tmpDataDir = new Path(rootDir, TMP_DATA_DIR); - sysNsDir = new Path(tmpDataDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); - defNsDir = new Path(tmpDataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); - baseDirs = new Path[]{rootDir, - new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY), - new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY)}; - backupDir = new Path(rootDir, HConstants.MIGRATION_NAME); - } - - - public void upgradeTableDirs() throws IOException, DeserializationException { - // if new version is written then upgrade is done - if (verifyNSUpgrade(fs, rootDir)) { - return; - } - - makeNamespaceDirs(); - - migrateTables(); - - migrateSnapshots(); - - migrateDotDirs(); - - migrateMeta(); - - migrateACL(); - - deleteRoot(); - - FSUtils.setVersion(fs, rootDir); - } - - /** - * Remove the -ROOT- dir. No longer of use. - * @throws IOException - */ - public void deleteRoot() throws IOException { - Path rootDir = new Path(this.rootDir, "-ROOT-"); - if (this.fs.exists(rootDir)) { - if (!this.fs.delete(rootDir, true)) LOG.info("Failed remove of " + rootDir); - LOG.info("Deleted " + rootDir); - } - } - - /** - * Rename all the dot dirs -- .data, .archive, etc. -- as data, archive, etc.; i.e. minus the dot. - * @throws IOException - */ - public void migrateDotDirs() throws IOException { - // Dot dirs to rename. Leave the tmp dir named '.tmp' and snapshots as .hbase-snapshot. - final Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); - Path [][] dirs = new Path[][] { - new Path [] {new Path(rootDir, DOT_CORRUPT), new Path(rootDir, HConstants.CORRUPT_DIR_NAME)}, - new Path [] {new Path(rootDir, DOT_LOGS), new Path(rootDir, HConstants.HREGION_LOGDIR_NAME)}, - new Path [] {new Path(rootDir, DOT_OLD_LOGS), - new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME)}, - new Path [] {new Path(rootDir, TMP_DATA_DIR), - new Path(rootDir, HConstants.BASE_NAMESPACE_DIR)}, - new Path[] { new Path(rootDir, DOT_LIB_DIR), - new Path(rootDir, HConstants.LIB_DIR)}}; - for (Path [] dir: dirs) { - Path src = dir[0]; - Path tgt = dir[1]; - if (!this.fs.exists(src)) { - LOG.info("Does not exist: " + src); - continue; - } - rename(src, tgt); - } - // Do the .archive dir. Need to move its subdirs to the default ns dir under data dir... so - // from '.archive/foo', to 'archive/data/default/foo'. - Path oldArchiveDir = new Path(rootDir, DOT_ARCHIVE); - if (this.fs.exists(oldArchiveDir)) { - // This is a pain doing two nn calls but portable over h1 and h2. - mkdirs(archiveDir); - Path archiveDataDir = new Path(archiveDir, HConstants.BASE_NAMESPACE_DIR); - mkdirs(archiveDataDir); - rename(oldArchiveDir, new Path(archiveDataDir, - NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); - } - // Update the system and user namespace dirs removing the dot in front of .data. - Path dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR); - sysNsDir = new Path(dataDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); - defNsDir = new Path(dataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); - } - - private void mkdirs(final Path p) throws IOException { - if (!this.fs.mkdirs(p)) throw new IOException("Failed make of " + p); - } - - private void rename(final Path src, final Path tgt) throws IOException { - if (!fs.rename(src, tgt)) { - throw new IOException("Failed move " + src + " to " + tgt); - } - } - - /** - * Create the system and default namespaces dirs - * @throws IOException - */ - public void makeNamespaceDirs() throws IOException { - if (!fs.exists(sysNsDir)) { - if (!fs.mkdirs(sysNsDir)) { - throw new IOException("Failed to create system namespace dir: " + sysNsDir); - } - } - if (!fs.exists(defNsDir)) { - if (!fs.mkdirs(defNsDir)) { - throw new IOException("Failed to create default namespace dir: " + defNsDir); - } - } - } - - /** - * Migrate all tables into respective namespaces, either default or system. We put them into - * a temporary location, '.data', in case a user table is name 'data'. In a later method we will - * move stuff from .data to data. - * @throws IOException - */ - public void migrateTables() throws IOException { - List<String> sysTables = Lists.newArrayList("-ROOT-",".META.", ".META"); - - // Migrate tables including archive and tmp - for (Path baseDir: baseDirs) { - if (!fs.exists(baseDir)) continue; - List<Path> oldTableDirs = FSUtils.getLocalTableDirs(fs, baseDir); - for (Path oldTableDir: oldTableDirs) { - if (NON_USER_TABLE_DIRS.contains(oldTableDir.getName())) continue; - if (sysTables.contains(oldTableDir.getName())) continue; - // Make the new directory under the ns to which we will move the table. - Path nsDir = new Path(this.defNsDir, - TableName.valueOf(oldTableDir.getName()).getQualifierAsString()); - LOG.info("Moving " + oldTableDir + " to " + nsDir); - if (!fs.exists(nsDir.getParent())) { - if (!fs.mkdirs(nsDir.getParent())) { - throw new IOException("Failed to create namespace dir "+nsDir.getParent()); - } - } - if (sysTables.indexOf(oldTableDir.getName()) < 0) { - LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir); - if (!fs.rename(oldTableDir, nsDir)) { - throw new IOException("Failed to move "+oldTableDir+" to namespace dir "+nsDir); - } - } - } - } - } - - public void migrateSnapshots() throws IOException { - //migrate snapshot dir - Path oldSnapshotDir = new Path(rootDir, HConstants.OLD_SNAPSHOT_DIR_NAME); - Path newSnapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME); - if (fs.exists(oldSnapshotDir)) { - boolean foundOldSnapshotDir = false; - // Logic to verify old snapshot dir culled from SnapshotManager - // ignore all the snapshots in progress - FileStatus[] snapshots = fs.listStatus(oldSnapshotDir, - new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); - // loop through all the completed snapshots - for (FileStatus snapshot : snapshots) { - Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - // if the snapshot is bad - if (fs.exists(info)) { - foundOldSnapshotDir = true; - break; - } - } - if(foundOldSnapshotDir) { - LOG.info("Migrating snapshot dir"); - if (!fs.rename(oldSnapshotDir, newSnapshotDir)) { - throw new IOException("Failed to move old snapshot dir "+ - oldSnapshotDir+" to new "+newSnapshotDir); - } - } - } - } - - public void migrateMeta() throws IOException { - Path newMetaDir = new Path(this.sysNsDir, TableName.META_TABLE_NAME.getQualifierAsString()); - Path newMetaRegionDir = - new Path(newMetaDir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); - Path oldMetaDir = new Path(rootDir, ".META."); - if (fs.exists(oldMetaDir)) { - LOG.info("Migrating meta table " + oldMetaDir.getName() + " to " + newMetaDir); - if (!fs.rename(oldMetaDir, newMetaDir)) { - throw new IOException("Failed to migrate meta table " - + oldMetaDir.getName() + " to " + newMetaDir); - } - } else { - // on windows NTFS, meta's name is .META (note the missing dot at the end) - oldMetaDir = new Path(rootDir, ".META"); - if (fs.exists(oldMetaDir)) { - LOG.info("Migrating meta table " + oldMetaDir.getName() + " to " + newMetaDir); - if (!fs.rename(oldMetaDir, newMetaDir)) { - throw new IOException("Failed to migrate meta table " - + oldMetaDir.getName() + " to " + newMetaDir); - } - } - } - - // Since meta table name has changed rename meta region dir from it's old encoding to new one - Path oldMetaRegionDir = HRegion.getRegionDir(rootDir, - new Path(newMetaDir, "1028785192").toString()); - if (fs.exists(oldMetaRegionDir)) { - LOG.info("Migrating meta region " + oldMetaRegionDir + " to " + newMetaRegionDir); - if (!fs.rename(oldMetaRegionDir, newMetaRegionDir)) { - throw new IOException("Failed to migrate meta region " - + oldMetaRegionDir + " to " + newMetaRegionDir); - } - } - // Remove .tableinfo files as they refer to ".META.". - // They will be recreated by master on startup. - removeTableInfoInPre96Format(TableName.META_TABLE_NAME); - - Path oldRootDir = new Path(rootDir, "-ROOT-"); - if(!fs.rename(oldRootDir, backupDir)) { - throw new IllegalStateException("Failed to old data: "+oldRootDir+" to "+backupDir); - } - } - - /** - * Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under - * table directory). - * @param tableName - * @throws IOException - */ - private void removeTableInfoInPre96Format(TableName tableName) throws IOException { - Path tableDir = FSUtils.getTableDir(rootDir, tableName); - FileStatus[] status = FSUtils.listStatus(fs, tableDir, TABLEINFO_PATHFILTER); - if (status == null) return; - for (FileStatus fStatus : status) { - FSUtils.delete(fs, fStatus.getPath(), false); - } - } - - public void migrateACL() throws IOException { - - TableName oldTableName = TableName.valueOf(OLD_ACL); - Path oldTablePath = new Path(rootDir, oldTableName.getNameAsString()); - - if(!fs.exists(oldTablePath)) { - return; - } - - LOG.info("Migrating ACL table"); - - TableName newTableName = AccessControlLists.ACL_TABLE_NAME; - Path newTablePath = FSUtils.getTableDir(rootDir, newTableName); - HTableDescriptor oldDesc = - readTableDescriptor(fs, getCurrentTableInfoStatus(fs, oldTablePath)); - - if(FSTableDescriptors.getTableInfoPath(fs, newTablePath) == null) { - LOG.info("Creating new tableDesc for ACL"); - HTableDescriptor newDesc = new HTableDescriptor(oldDesc); - newDesc.setName(newTableName); - new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( - newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true); - } - - - ServerName fakeServer = ServerName.valueOf("nsupgrade", 96, 123); - final WALFactory walFactory = new WALFactory(conf, null, fakeServer.toString()); - WAL metawal = walFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); - FSTableDescriptors fst = new FSTableDescriptors(conf); - HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO, - fst.get(TableName.META_TABLE_NAME), metawal, conf); - HRegion region = null; - try { - for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) { - LOG.info("Migrating ACL region "+regionDir.getName()); - HRegionInfo oldRegionInfo = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - HRegionInfo newRegionInfo = - new HRegionInfo(newTableName, - oldRegionInfo.getStartKey(), - oldRegionInfo.getEndKey(), - oldRegionInfo.isSplit(), - oldRegionInfo.getRegionId()); - newRegionInfo.setOffline(oldRegionInfo.isOffline()); - region = - new HRegion( - HRegionFileSystem.openRegionFromFileSystem(conf, fs, oldTablePath, - oldRegionInfo, false), - metawal, - conf, - oldDesc, - null); - region.initialize(); - updateAcls(region); - // closing the region would flush it so we don't need an explicit flush to save - // acl changes. - region.close(); - - //Create new region dir - Path newRegionDir = new Path(newTablePath, newRegionInfo.getEncodedName()); - if(!fs.exists(newRegionDir)) { - if(!fs.mkdirs(newRegionDir)) { - throw new IllegalStateException("Failed to create new region dir: " + newRegionDir); - } - } - - //create new region info file, delete in case one exists - HRegionFileSystem.openRegionFromFileSystem(conf, fs, newTablePath, newRegionInfo, false); - - //migrate region contents - for(FileStatus file : fs.listStatus(regionDir, new FSUtils.UserTableDirFilter(fs))) { - if(file.getPath().getName().equals(HRegionFileSystem.REGION_INFO_FILE)) - continue; - if(!fs.rename(file.getPath(), newRegionDir)) { - throw new IllegalStateException("Failed to move file "+file.getPath()+" to " + - newRegionDir); - } - } - meta.put(MetaTableAccessor.makePutFromRegionInfo(newRegionInfo)); - meta.delete(MetaTableAccessor.makeDeleteFromRegionInfo(oldRegionInfo)); - } - } finally { - meta.flushcache(); - meta.waitForFlushesAndCompactions(); - meta.close(); - metawal.close(); - if(region != null) { - region.close(); - } - } - if(!fs.rename(oldTablePath, backupDir)) { - throw new IllegalStateException("Failed to old data: "+oldTablePath+" to "+backupDir); - } - } - - /** - * Deletes the old _acl_ entry, and inserts a new one using namespace. - * @param region - * @throws IOException - */ - void updateAcls(HRegion region) throws IOException { - byte[] rowKey = Bytes.toBytes(NamespaceUpgrade.OLD_ACL); - // get the old _acl_ entry, if present. - Get g = new Get(rowKey); - Result r = region.get(g); - if (r != null && r.size() > 0) { - // create a put for new _acl_ entry with rowkey as hbase:acl - Put p = new Put(AccessControlLists.ACL_GLOBAL_NAME); - for (Cell c : r.rawCells()) { - p.addImmutable(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), CellUtil.cloneValue(c)); - } - region.put(p); - // delete the old entry - Delete del = new Delete(rowKey); - region.delete(del); - } - - // delete the old entry for '-ROOT-' - rowKey = Bytes.toBytes(TableName.OLD_ROOT_STR); - Delete del = new Delete(rowKey); - region.delete(del); - - // rename .META. to hbase:meta - rowKey = Bytes.toBytes(TableName.OLD_META_STR); - g = new Get(rowKey); - r = region.get(g); - if (r != null && r.size() > 0) { - // create a put for new .META. entry with rowkey as hbase:meta - Put p = new Put(TableName.META_TABLE_NAME.getName()); - for (Cell c : r.rawCells()) { - p.addImmutable(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), CellUtil.cloneValue(c)); - } - region.put(p); - // delete the old entry - del = new Delete(rowKey); - region.delete(del); - } - } - - //Culled from FSTableDescriptors - private static HTableDescriptor readTableDescriptor(FileSystem fs, - FileStatus status) throws IOException { - int len = Ints.checkedCast(status.getLen()); - byte [] content = new byte[len]; - FSDataInputStream fsDataInputStream = fs.open(status.getPath()); - try { - fsDataInputStream.readFully(content); - } finally { - fsDataInputStream.close(); - } - HTableDescriptor htd = null; - try { - htd = HTableDescriptor.parseFrom(content); - } catch (DeserializationException e) { - throw new IOException("content=" + Bytes.toShort(content), e); - } - return htd; - } - - private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() { - @Override - public boolean accept(Path p) { - // Accept any file that starts with TABLEINFO_NAME - return p.getName().startsWith(".tableinfo"); - } - }; - - static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR = - new Comparator<FileStatus>() { - @Override - public int compare(FileStatus left, FileStatus right) { - return right.compareTo(left); - }}; - - // logic culled from FSTableDescriptors - static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir) - throws IOException { - FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); - if (status == null || status.length < 1) return null; - FileStatus mostCurrent = null; - for (FileStatus file : status) { - if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) { - mostCurrent = file; - } - } - return mostCurrent; - } - - public static boolean verifyNSUpgrade(FileSystem fs, Path rootDir) - throws IOException { - try { - return FSUtils.getVersion(fs, rootDir).equals(HConstants.FILE_SYSTEM_VERSION); - } catch (DeserializationException e) { - throw new IOException("Failed to verify namespace upgrade", e); - } - } - - - @Override - public int run(String[] args) throws Exception { - if (args.length < 1 || !args[0].equals("--upgrade")) { - System.out.println("Usage: <CMD> --upgrade"); - return 0; - } - init(); - upgradeTableDirs(); - return 0; - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } - - @Override - public Configuration getConf() { - return conf; - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/c5c395b6/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index 11dc47e..85f8d6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -25,237 +24,27 @@ import java.util.Map; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer; -import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.KeeperException.NoNodeException; /** - * Tool to migrate zookeeper data of older hbase versions(<0.95.0) to PB. + * utlity method to migrate zookeeper data across HBase versions. */ -public class ZKDataMigrator extends Configured implements Tool { +@InterfaceAudience.Private +public class ZKDataMigrator { private static final Log LOG = LogFactory.getLog(ZKDataMigrator.class); - @Override - public int run(String[] as) throws Exception { - Configuration conf = getConf(); - ZooKeeperWatcher zkw = null; - try { - zkw = new ZooKeeperWatcher(getConf(), "Migrate ZK data to PB.", - new ZKDataMigratorAbortable()); - if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) { - LOG.info("No hbase related data available in zookeeper. returning.."); - return 0; - } - List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.baseZNode); - if (children == null) { - LOG.info("No child nodes to mirgrate. returning.."); - return 0; - } - String childPath = null; - for (String child : children) { - childPath = ZKUtil.joinZNode(zkw.baseZNode, child); - if (child.equals(conf.get("zookeeper.znode.rootserver", "root-region-server"))) { - // -ROOT- region no longer present from 0.95.0, so we can remove this - // znode - ZKUtil.deleteNodeRecursively(zkw, childPath); - // TODO delete root table path from file system. - } else if (child.equals(conf.get("zookeeper.znode.rs", "rs"))) { - // Since there is no live region server instance during migration, we - // can remove this znode as well. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.draining.rs", "draining"))) { - // If we want to migrate to 0.95.0 from older versions we need to stop - // the existing cluster. So there wont be any draining servers so we - // can - // remove it. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.master", "master"))) { - // Since there is no live master instance during migration, we can - // remove this znode as well. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.backup.masters", "backup-masters"))) { - // Since there is no live backup master instances during migration, we - // can remove this znode as well. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.state", "shutdown"))) { - // shutdown node is not present from 0.95.0 onwards. Its renamed to - // "running". We can delete it. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.unassigned", "unassigned"))) { - // Any way during clean cluster startup we will remove all unassigned - // region nodes. we can delete all children nodes as well. This znode - // is - // renamed to "regions-in-transition" from 0.95.0 onwards. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.tableEnableDisable", "table")) - || child.equals(conf.get("zookeeper.znode.masterTableEnableDisable", "table"))) { - checkAndMigrateTableStatesToPB(zkw); - } else if (child.equals(conf.get("zookeeper.znode.masterTableEnableDisable92", - "table92"))) { - // This is replica of table states from tableZnode so we can remove - // this. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.splitlog", "splitlog"))) { - // This znode no longer available from 0.95.0 onwards, we can remove - // it. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.replication", "replication"))) { - checkAndMigrateReplicationNodesToPB(zkw); - } else if (child.equals(conf.get("zookeeper.znode.clusterId", "hbaseid"))) { - // it will be re-created by master. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION)) { - // not needed as it is transient. - ZKUtil.deleteNodeRecursively(zkw, childPath); - } else if (child.equals(conf.get("zookeeper.znode.acl.parent", "acl"))) { - // it will be re-created when hbase:acl is re-opened - ZKUtil.deleteNodeRecursively(zkw, childPath); - } - } - } catch (Exception e) { - LOG.error("Got exception while updating znodes ", e); - throw new IOException(e); - } finally { - if (zkw != null) { - zkw.close(); - } - } - return 0; - } - - private void checkAndMigrateTableStatesToPB(ZooKeeperWatcher zkw) throws KeeperException, - InterruptedException { - List<String> tables = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - if (tables == null) { - LOG.info("No table present to migrate table state to PB. returning.."); - return; - } - for (String table : tables) { - String znode = ZKUtil.joinZNode(zkw.tableZNode, table); - // Delete -ROOT- table state znode since its no longer present in 0.95.0 - // onwards. - if (table.equals("-ROOT-") || table.equals(".META.")) { - ZKUtil.deleteNode(zkw, znode); - continue; - } - byte[] data = ZKUtil.getData(zkw, znode); - if (ProtobufUtil.isPBMagicPrefix(data)) continue; - ZooKeeperProtos.DeprecatedTableState.Builder builder = - ZooKeeperProtos.DeprecatedTableState.newBuilder(); - builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data))); - data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(zkw, znode, data); - } - } - - private void checkAndMigrateReplicationNodesToPB(ZooKeeperWatcher zkw) throws KeeperException, - InterruptedException { - String replicationZnodeName = getConf().get("zookeeper.znode.replication", "replication"); - String replicationPath = ZKUtil.joinZNode(zkw.baseZNode, replicationZnodeName); - List<String> replicationZnodes = ZKUtil.listChildrenNoWatch(zkw, replicationPath); - if (replicationZnodes == null) { - LOG.info("No replication related znodes present to migrate. returning.."); - return; - } - for (String child : replicationZnodes) { - String znode = ZKUtil.joinZNode(replicationPath, child); - if (child.equals(getConf().get("zookeeper.znode.replication.peers", "peers"))) { - List<String> peers = ZKUtil.listChildrenNoWatch(zkw, znode); - if (peers == null || peers.isEmpty()) { - LOG.info("No peers present to migrate. returning.."); - continue; - } - checkAndMigratePeerZnodesToPB(zkw, znode, peers); - } else if (child.equals(getConf().get("zookeeper.znode.replication.state", "state"))) { - // This is no longer used in >=0.95.x - ZKUtil.deleteNodeRecursively(zkw, znode); - } else if (child.equals(getConf().get("zookeeper.znode.replication.rs", "rs"))) { - List<String> rsList = ZKUtil.listChildrenNoWatch(zkw, znode); - if (rsList == null || rsList.isEmpty()) continue; - for (String rs : rsList) { - checkAndMigrateQueuesToPB(zkw, znode, rs); - } - } - } - } - - private void checkAndMigrateQueuesToPB(ZooKeeperWatcher zkw, String znode, String rs) - throws KeeperException, NoNodeException, InterruptedException { - String rsPath = ZKUtil.joinZNode(znode, rs); - List<String> peers = ZKUtil.listChildrenNoWatch(zkw, rsPath); - if (peers == null || peers.isEmpty()) return; - String peerPath = null; - for (String peer : peers) { - peerPath = ZKUtil.joinZNode(rsPath, peer); - List<String> files = ZKUtil.listChildrenNoWatch(zkw, peerPath); - if (files == null || files.isEmpty()) continue; - String filePath = null; - for (String file : files) { - filePath = ZKUtil.joinZNode(peerPath, file); - byte[] data = ZKUtil.getData(zkw, filePath); - if (data == null || Bytes.equals(data, HConstants.EMPTY_BYTE_ARRAY)) continue; - if (ProtobufUtil.isPBMagicPrefix(data)) continue; - ZKUtil.setData(zkw, filePath, - ZKUtil.positionToByteArray(Long.parseLong(Bytes.toString(data)))); - } - } - } - - private void checkAndMigratePeerZnodesToPB(ZooKeeperWatcher zkw, String znode, - List<String> peers) throws KeeperException, NoNodeException, InterruptedException { - for (String peer : peers) { - String peerZnode = ZKUtil.joinZNode(znode, peer); - byte[] data = ZKUtil.getData(zkw, peerZnode); - if (!ProtobufUtil.isPBMagicPrefix(data)) { - migrateClusterKeyToPB(zkw, peerZnode, data); - } - String peerStatePath = ZKUtil.joinZNode(peerZnode, - getConf().get("zookeeper.znode.replication.peers.state", "peer-state")); - if (ZKUtil.checkExists(zkw, peerStatePath) != -1) { - data = ZKUtil.getData(zkw, peerStatePath); - if (ProtobufUtil.isPBMagicPrefix(data)) continue; - migratePeerStateToPB(zkw, data, peerStatePath); - } - } - } - - private void migrateClusterKeyToPB(ZooKeeperWatcher zkw, String peerZnode, byte[] data) - throws KeeperException, NoNodeException { - ReplicationPeer peer = ZooKeeperProtos.ReplicationPeer.newBuilder() - .setClusterkey(Bytes.toString(data)).build(); - ZKUtil.setData(zkw, peerZnode, ProtobufUtil.prependPBMagic(peer.toByteArray())); - } - - private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data, - String peerStatePath) - throws KeeperException, NoNodeException { - String state = Bytes.toString(data); - if (ZooKeeperProtos.ReplicationState.State.ENABLED.name().equals(state)) { - ZKUtil.setData(zkw, peerStatePath, ReplicationStateZKBase.ENABLED_ZNODE_BYTES); - } else if (ZooKeeperProtos.ReplicationState.State.DISABLED.name().equals(state)) { - ZKUtil.setData(zkw, peerStatePath, ReplicationStateZKBase.DISABLED_ZNODE_BYTES); - } - } - /** * Method for table states migration. + * Used when upgrading from pre-2.0 to 2.0 * Reading state from zk, applying them to internal state * and delete. * Used by master to clean migration from zk based states to @@ -325,22 +114,4 @@ public class ZKDataMigrator extends Configured implements Tool { } } - public static void main(String args[]) throws Exception { - System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKDataMigrator(), args)); - } - - static class ZKDataMigratorAbortable implements Abortable { - private boolean aborted = false; - - @Override - public void abort(String why, Throwable e) { - LOG.error("Got aborted with reason: " + why + ", and error: " + e); - this.aborted = true; - } - - @Override - public boolean isAborted() { - return this.aborted; - } - } }