http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 4c87b57..d471eda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; @@ -43,8 +42,9 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.exceptions.HBaseException; +import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.mob.MobConstants; @@ -262,11 +262,12 @@ public class DeleteTableProcedure protected static void deleteFromFs(final MasterProcedureEnv env, final TableName tableName, final List<HRegionInfo> regions, final boolean archive) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); - final FileSystem fs = mfs.getFileSystem(); - final Path tempdir = mfs.getTempDir(); + final MasterStorage ms = env.getMasterServices().getMasterStorage(); + final FileSystem fs = ms.getFileSystem(); + final Path tempdir = ((LegacyPathIdentifier)ms.getTempContainer()).path; - final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); + final Path tableDir = FSUtils.getTableDir(((LegacyPathIdentifier)ms.getRootContainer()).path, + tableName); final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); if (fs.exists(tableDir)) { @@ -290,7 +291,8 @@ public class DeleteTableProcedure if (files != null && files.length > 0) { for (int i = 0; i < files.length; ++i) { if (!files[i].isDir()) continue; - HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath()); + HFileArchiver.archiveRegion(fs, ((LegacyPathIdentifier) ms.getRootContainer()).path, + tempTableDir, files[i].getPath()); } } fs.delete(tempdir, true); @@ -303,19 +305,20 @@ public class DeleteTableProcedure if (archive) { for (HRegionInfo hri : regions) { LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS"); - HFileArchiver.archiveRegion(fs, mfs.getRootDir(), + HFileArchiver.archiveRegion(fs, ((LegacyPathIdentifier) ms.getRootContainer()).path, tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName())); } LOG.debug("Table '" + tableName + "' archived!"); } // Archive mob data - Path mobTableDir = FSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), - tableName); + Path mobTableDir = FSUtils.getTableDir(new Path(((LegacyPathIdentifier) ms.getRootContainer()) + .path, MobConstants.MOB_DIR_NAME), tableName); Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName()); if (fs.exists(regionDir)) { - HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir); + HFileArchiver.archiveRegion(fs, ((LegacyPathIdentifier) ms.getRootContainer()).path, + mobTableDir, regionDir); } // Delete table directory from FS (temp directory)
http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java index d37159b..dbcb7c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -31,12 +31,10 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.util.Bytes; @@ -62,7 +60,7 @@ public final class MasterDDLOperationHelper { List<HRegionInfo> regionInfoList, final byte[] familyName, boolean hasMob) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterStorage ms = env.getMasterServices().getMasterStorage(); if (LOG.isDebugEnabled()) { LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName); } @@ -71,7 +69,7 @@ public final class MasterDDLOperationHelper { } for (HRegionInfo hri : regionInfoList) { // Delete the family directory in FS for all the regions one by one - mfs.deleteFamilyFromFS(hri, familyName, hasMob); + ms.deleteFamilyFromStorage(hri, familyName, hasMob); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index feb9228..9c373bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; +import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -333,12 +334,12 @@ public class RestoreSnapshotProcedure if (!getTableName().isSystemTable()) { // Table already exist. Check and update the region quota for this table namespace. - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterStorage ms = env.getMasterServices().getMasterStorage(); SnapshotManifest manifest = SnapshotManifest.open( env.getMasterConfiguration(), - mfs.getFileSystem(), - SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), - snapshot); + ms.getFileSystem(), + SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, ((LegacyPathIdentifier) ms + .getRootContainer()).path), snapshot); int snapshotRegionCount = manifest.getRegionManifestsMap().size(); int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName); @@ -365,9 +366,9 @@ public class RestoreSnapshotProcedure * @throws IOException **/ private void restoreSnapshot(final MasterProcedureEnv env) throws IOException { - MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem(); + MasterStorage fileSystemManager = env.getMasterServices().getMasterStorage(); FileSystem fs = fileSystemManager.getFileSystem(); - Path rootDir = fileSystemManager.getRootDir(); + Path rootDir = ((LegacyPathIdentifier) fileSystemManager.getRootContainer()).path; final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher(); LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index 40f5845..6e14f47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -90,7 +90,7 @@ public final class MasterSnapshotVerifier { * @param rootDir root directory of the hbase installation. */ public MasterSnapshotVerifier(MasterServices services, SnapshotDescription snapshot, Path rootDir) { - this.fs = services.getMasterFileSystem().getFileSystem(); + this.fs = services.getMasterStorage().getFileSystem(); this.services = services; this.snapshot = snapshot; this.rootDir = rootDir; http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 3a5b974..a2c0ea0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -27,10 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -50,8 +47,9 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; @@ -88,7 +86,6 @@ import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.KeyLocker; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.zookeeper.KeeperException; /** @@ -188,8 +185,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable throws IOException, UnsupportedOperationException { this.master = master; - this.rootDir = master.getMasterFileSystem().getRootDir(); - checkSnapshotSupport(master.getConfiguration(), master.getMasterFileSystem()); + this.rootDir = ((LegacyPathIdentifier) master.getMasterStorage().getRootContainer()).path; + checkSnapshotSupport(master.getConfiguration(), master.getMasterStorage()); this.coordinator = coordinator; this.executorService = pool; @@ -214,7 +211,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable private List<SnapshotDescription> getCompletedSnapshots(Path snapshotDir) throws IOException { List<SnapshotDescription> snapshotDescs = new ArrayList<SnapshotDescription>(); // first create the snapshot root path and check to see if it exists - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); if (snapshotDir == null) snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); // if there are no snapshots, return an empty list @@ -274,8 +271,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable void resetTempDir() throws IOException { // cleanup any existing snapshots. Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir); - if (master.getMasterFileSystem().getFileSystem().exists(tmpdir)) { - if (!master.getMasterFileSystem().getFileSystem().delete(tmpdir, true)) { + if (master.getMasterStorage().getFileSystem().exists(tmpdir)) { + if (!master.getMasterStorage().getFileSystem().delete(tmpdir, true)) { LOG.warn("Couldn't delete working snapshot directory: " + tmpdir); } } @@ -295,7 +292,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable String snapshotName = snapshot.getName(); // first create the snapshot description and check to see if it exists - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); // Get snapshot info from file system. The one passed as parameter is a "fake" snapshotInfo with // just the "name" and it does not contains the "real" snapshot information @@ -428,7 +425,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable */ private synchronized void prepareToTakeSnapshot(SnapshotDescription snapshot) throws HBaseSnapshotException { - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); TableName snapshotTable = TableName.valueOf(snapshot.getTable()); @@ -524,7 +521,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // cleanup the working directory by trying to delete it from the fs. Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); try { - if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) { + if (!this.master.getMasterStorage().getFileSystem().delete(workingDir, true)) { LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" + ClientSnapshotDescriptionUtils.toString(snapshot)); } @@ -662,7 +659,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable private boolean isSnapshotCompleted(SnapshotDescription snapshot) throws IOException { try { final Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); // check to see if the snapshot already exists return fs.exists(snapshotDir); } catch (IllegalArgumentException iae) { @@ -765,7 +762,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable SnapshotDescription reqSnapshot, final long nonceGroup, final long nonce) throws IOException { - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir); // check if the snapshot exists @@ -1045,12 +1042,12 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * starting the master if there're snapshots present but the cleaners needed are missing. * Otherwise we can end up with snapshot data loss. * @param conf The {@link Configuration} object to use - * @param mfs The MasterFileSystem to use + * @param ms The MasterFileSystem to use * @throws IOException in case of file-system operation failure * @throws UnsupportedOperationException in case cleaners are missing and * there're snapshot in the system */ - private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs) + private void checkSnapshotSupport(final Configuration conf, final MasterStorage ms) throws IOException, UnsupportedOperationException { // Verify if snapshot is disabled by the user String enabled = conf.get(HBASE_SNAPSHOT_ENABLED); @@ -1067,8 +1064,9 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable if (cleaners != null) Collections.addAll(logCleaners, cleaners); // check if an older version of snapshot directory was present - Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME); - FileSystem fs = mfs.getFileSystem(); + Path oldSnapshotDir = new Path(((LegacyPathIdentifier) ms.getRootContainer()).path, HConstants + .OLD_SNAPSHOT_DIR_NAME); + FileSystem fs = ms.getFileSystem(); List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir)); if (ss != null && !ss.isEmpty()) { LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir); @@ -1109,7 +1107,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // otherwise we end up with snapshot data loss. if (!snapshotEnabled) { LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners."); - Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir()); + Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(((LegacyPathIdentifier) ms + .getRootContainer()).path); if (fs.exists(snapshotDir)) { FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); @@ -1126,8 +1125,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable IOException, UnsupportedOperationException { this.master = master; - this.rootDir = master.getMasterFileSystem().getRootDir(); - checkSnapshotSupport(master.getConfiguration(), master.getMasterFileSystem()); + this.rootDir = ((LegacyPathIdentifier) master.getMasterStorage().getRootContainer()).path; + checkSnapshotSupport(master.getConfiguration(), master.getMasterStorage()); // get the configuration for the coordinator Configuration conf = master.getConfiguration(); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index a0e5b93..503f346 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.SnapshotSentinel; @@ -107,8 +108,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh this.snapshotManager = snapshotManager; this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = this.master.getConfiguration(); - this.fs = this.master.getMasterFileSystem().getFileSystem(); - this.rootDir = this.master.getMasterFileSystem().getRootDir(); + this.fs = this.master.getMasterStorage().getFileSystem(); + this.rootDir = ((LegacyPathIdentifier) this.master.getMasterStorage().getRootContainer()).path; this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); this.monitor = new ForeignExceptionDispatcher(snapshot.getName()); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index ebbe5df..7f03435 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -31,15 +31,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableInfoMissingException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; +import org.apache.hadoop.hbase.fs.StorageIdentifier; /** * Implementation of {@link TableDescriptors} that reads descriptors from the @@ -63,7 +62,7 @@ import org.apache.hadoop.hbase.fs.MasterFileSystem; public class FSTableDescriptors implements TableDescriptors { private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class); - private final MasterFileSystem mfs; + private final MasterStorage<? extends StorageIdentifier> ms; private final boolean fsreadonly; private volatile boolean usecache; @@ -103,17 +102,18 @@ public class FSTableDescriptors implements TableDescriptors { */ public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException { - this(MasterFileSystem.open(conf, fs, rootdir, false), fsreadonly, usecache); + this(MasterStorage.open(conf, rootdir, false), fsreadonly, usecache); } - private FSTableDescriptors(final MasterFileSystem mfs, boolean fsreadonly, boolean usecache) + private FSTableDescriptors(final MasterStorage<? extends StorageIdentifier> ms, boolean + fsreadonly, boolean usecache) throws IOException { super(); - this.mfs = mfs; + this.ms = ms; this.fsreadonly = fsreadonly; this.usecache = usecache; - this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(mfs.getConfiguration()); + this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(ms.getConfiguration()); } @Override @@ -164,7 +164,7 @@ public class FSTableDescriptors implements TableDescriptors { } HTableDescriptor tdmt = null; try { - tdmt = mfs.getTableDescriptor(tablename); + tdmt = ms.getTableDescriptor(tablename); } catch (NullPointerException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e); } catch (TableInfoMissingException e) { @@ -198,7 +198,7 @@ public class FSTableDescriptors implements TableDescriptors { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; - for (TableName table: mfs.getTables()) { + for (TableName table: ms.getTables()) { HTableDescriptor htd = null; try { htd = get(table); @@ -239,7 +239,7 @@ public class FSTableDescriptors implements TableDescriptors { @Override public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException { Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>(); - for (TableName table: mfs.getTables(name)) { + for (TableName table: ms.getTables(name)) { HTableDescriptor htd = null; try { htd = get(table); @@ -284,7 +284,7 @@ public class FSTableDescriptors implements TableDescriptors { if (fsreadonly) { throw new NotImplementedException("Cannot remove a table descriptor - in read only mode"); } - mfs.deleteTable(tablename); // for test only?? + ms.deleteTable(tablename); // for test only?? HTableDescriptor descriptor = this.cache.remove(tablename); return descriptor; } @@ -299,7 +299,7 @@ public class FSTableDescriptors implements TableDescriptors { if (fsreadonly) { throw new NotImplementedException("Cannot update a table descriptor - in read only mode"); } - mfs.updateTableDescriptor(td); + ms.updateTableDescriptor(td); if (usecache) { this.cache.put(td.getTableName(), td); } @@ -328,6 +328,6 @@ public class FSTableDescriptors implements TableDescriptors { } // forceCreation??? - return mfs.createTableDescriptor(htd, forceCreation); + return ms.createTableDescriptor(htd, forceCreation); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 662f20d..0eacc2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -40,13 +40,8 @@ import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Vector; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.FutureTask; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; @@ -73,13 +68,12 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.fs.RegionStorage; -import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor; +import org.apache.hadoop.hbase.fs.StorageIdentifier; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.security.AccessDeniedException; -import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.FSProtos; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -93,7 +87,6 @@ import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Throwables; import com.google.common.collect.Iterators; import com.google.common.primitives.Ints; @@ -1088,12 +1081,12 @@ public abstract class FSUtils { int cfCountTotal = 0; int cfFragTotal = 0; - MasterFileSystem mfs = master.getMasterFileSystem(); - for (TableName table: mfs.getTables()) { + MasterStorage<? extends StorageIdentifier> ms = master.getMasterStorage(); + for (TableName table: ms.getTables()) { int cfCount = 0; int cfFrag = 0; - for (HRegionInfo hri: mfs.getRegions(table)) { - RegionStorage rfs = mfs.getRegionStorage(hri); + for (HRegionInfo hri: ms.getRegions(table)) { + RegionStorage rfs = ms.getRegionStorage(hri); final Collection<String> families = rfs.getFamilies(); for (String family: families) { cfCount++; @@ -1782,14 +1775,14 @@ public abstract class FSUtils { throws IOException { long startTime = EnvironmentEdgeManager.currentTime(); - MasterFileSystem mfs = MasterFileSystem.open(conf, false); + MasterStorage<? extends StorageIdentifier> ms = MasterStorage.open(conf, false); Collection<HRegionInfo> hris; if (desiredTable != null) { - hris = mfs.getRegions(TableName.valueOf(desiredTable)); + hris = ms.getRegions(TableName.valueOf(desiredTable)); } else { hris = new ArrayList<HRegionInfo>(); - for (TableName tableName: mfs.getTables()) { - hris.addAll(mfs.getRegions(tableName)); + for (TableName tableName: ms.getTables()) { + hris.addAll(ms.getRegions(tableName)); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 8a14cca..45c9989 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -110,7 +110,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.fs.RegionStorage; import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor; import org.apache.hadoop.hbase.fs.StorageIdentifier; @@ -118,7 +118,6 @@ import org.apache.hadoop.hbase.fs.legacy.LegacyLayout; import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.fs.legacy.LegacyRegionStorage; import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor; -import org.apache.hadoop.hbase.fs.MasterFileSystem; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.RegionState; @@ -130,7 +129,6 @@ import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.util.MetaUtils; import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; @@ -240,7 +238,7 @@ public class HBaseFsck extends Configured implements Closeable { // successful private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false); - private final MasterFileSystem mfs; + private final MasterStorage ms; /*********** * Options @@ -361,7 +359,7 @@ public class HBaseFsck extends Configured implements Closeable { // Disable usage of meta replicas in hbck getConf().setBoolean(HConstants.USE_META_REPLICAS, false); - mfs = MasterFileSystem.open(getConf(), false); + ms = MasterStorage.open(getConf(), false); errors = getErrorReporter(getConf()); this.executor = exec; @@ -1095,14 +1093,14 @@ public class HBaseFsck extends Configured implements Closeable { private void offlineReferenceFileRepair() throws IOException { clearState(); LOG.info("Validating mapping using HDFS state"); - mfs.visitStoreFiles(new StoreFileVisitor() { + ms.visitStoreFiles(new StoreFileVisitor() { @Override public void storeFile(HRegionInfo region, String family, StoreFileInfo storeFile) throws IOException { if (errors != null) errors.progress(); if (!storeFile.isReference()) return; - FileSystem fs = mfs.getFileSystem(); + FileSystem fs = ms.getFileSystem(); Path path = storeFile.getPath(); Path referredToFile = StoreFileInfo.getReferredToFile(path); if (fs.exists(referredToFile)) return; // good, expected http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java index 4abf350..c3effc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java @@ -26,10 +26,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; import org.junit.Before; @@ -158,7 +157,7 @@ public class TestHColumnDescriptorDefaultVersions { verifyHColumnDescriptor(expected, hcds, tableName, families); // Verify descriptor from HDFS - MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + MasterStorage mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterStorage(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); HTableDescriptor td = LegacyTableDescriptor.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); hcds = td.getColumnFamilies(); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java index 2dca6b1..baaa14b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java @@ -230,7 +230,7 @@ public class TestNamespace { TEST_UTIL.waitTableAvailable(desc.getTableName().getName(), 10000); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); assertTrue(fs.exists( - new Path(master.getMasterFileSystem().getRootDir(), + new Path(master.getMasterStorage().getRootDir(), new Path(HConstants.BASE_NAMESPACE_DIR, new Path(nsName, desc.getTableName().getQualifierAsString()))))); assertEquals(1, admin.listTables().length); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index f5f380f..9b4206c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -26,7 +26,6 @@ import java.util.HashSet; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -34,7 +33,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; @@ -44,7 +43,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -304,7 +302,7 @@ public class TestRestoreSnapshotFromClient { } private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException { - MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + MasterStorage mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterStorage(); final Set<String> families = new HashSet<String>(); mfs.visitStoreFiles(tableName, new StoreFileVisitor() { @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 2d4b4c9..565da24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; @@ -121,8 +120,8 @@ public class TestSnapshotCloneIndependence { @Before public void setup() throws Exception { - fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + fs = UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem(); + rootDir = UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); admin = UTIL.getHBaseAdmin(); originalTableName = TableName.valueOf("test" + testName.getMethodName()); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index fbd347a..702b80a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -221,10 +221,10 @@ public class TestSnapshotFromClient { SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); LOG.debug("FS state after snapshot:"); - UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); + UTIL.getHBaseCluster().getMaster().getMasterStorage().logStorageState(LOG); SnapshotTestingUtils.confirmSnapshotValid( ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, @@ -291,10 +291,10 @@ public class TestSnapshotFromClient { SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); LOG.debug("FS state after snapshot:"); - UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); + UTIL.getHBaseCluster().getMaster().getMasterStorage().logStorageState(LOG); List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region List<byte[]> nonEmptyCfs = Lists.newArrayList(); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index 1a774e5..e043290 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -98,8 +98,8 @@ public class TestSnapshotMetadata { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); - fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + fs = UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem(); + rootDir = UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); } @AfterClass http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 8b9428f..b357066 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -58,7 +58,7 @@ public class TestTableSnapshotScanner { public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_REGION_SERVERS, true); - rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + rootDir = UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index 3180c50..8b869a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -280,7 +280,7 @@ public class TestBlockReorder { "/" + targetRs.getServerName().toString()).toUri().getPath(); DistributedFileSystem mdfs = (DistributedFileSystem) - hbm.getMaster().getMasterFileSystem().getFileSystem(); + hbm.getMaster().getMasterStorage().getFileSystem(); int nbTest = 0; http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 70c879a..30bc3e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.mapreduce; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; @@ -63,7 +62,7 @@ public abstract class TableSnapshotInputFormatTestBase { public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_REGION_SERVERS, true); - rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + rootDir = UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); } @@ -127,7 +126,7 @@ public abstract class TableSnapshotInputFormatTestBase { testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); - UTIL.getHBaseCluster().getMaster().getMasterFileSystem().visitStoreFiles(tableName, + UTIL.getHBaseCluster().getMaster().getMasterStorage().visitStoreFiles(tableName, new StoreFileVisitor() { @Override public void storeFile(HRegionInfo region, String family, StoreFileInfo storeFile) http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index c17d408..d109907 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -108,7 +108,7 @@ public class TestWALPlayer { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() + String walInputDir = new Path(cluster.getMaster().getMasterStorage() .getRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); Configuration configuration= TEST_UTIL.getConfiguration(); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 56a8522..ee232d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -103,7 +103,7 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override - public MasterFileSystem getMasterFileSystem() { + public MasterFileSystem getMasterStorage() { return null; } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 4348d2b..fa8a74f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; import org.apache.hadoop.hbase.io.Reference; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; @@ -90,7 +90,7 @@ public class TestCatalogJanitor { */ class MockMasterServices extends MockNoopMasterServices { private final ClusterConnection connection; - private final MasterFileSystem mfs; + private final MasterStorage mfs; private final AssignmentManager asm; private final ServerManager sm; @@ -134,7 +134,7 @@ public class TestCatalogJanitor { FSUtils.setRootDir(getConfiguration(), rootdir); Mockito.mock(AdminProtos.AdminService.BlockingInterface.class); - this.mfs = new MasterFileSystem(this); + this.mfs = new MasterStorage(this); this.asm = Mockito.mock(AssignmentManager.class); this.sm = Mockito.mock(ServerManager.class); } @@ -145,7 +145,7 @@ public class TestCatalogJanitor { } @Override - public MasterFileSystem getMasterFileSystem() { + public MasterStorage getMasterStorage() { return this.mfs; } @@ -244,7 +244,7 @@ public class TestCatalogJanitor { // remove the parent. Result r = createResult(parent, splita, splitb); // Add a reference under splitA directory so we don't clear out the parent. - Path rootdir = services.getMasterFileSystem().getRootDir(); + Path rootdir = services.getMasterStorage().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, splita, @@ -253,7 +253,7 @@ public class TestCatalogJanitor { long now = System.currentTimeMillis(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); - FileSystem fs = services.getMasterFileSystem().getFileSystem(); + FileSystem fs = services.getMasterStorage().getFileSystem(); Path path = ref.write(fs, p); assertTrue(fs.exists(path)); assertFalse(janitor.cleanParent(parent, r)); @@ -580,7 +580,7 @@ public class TestCatalogJanitor { // remove the parent. Result parentMetaRow = createResult(parent, splita, splitb); FileSystem fs = FileSystem.get(htu.getConfiguration()); - Path rootdir = services.getMasterFileSystem().getRootDir(); + Path rootdir = services.getMasterStorage().getRootDir(); // have to set the root directory since we use it in HFileDisposer to figure out to get to the // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). @@ -663,7 +663,7 @@ public class TestCatalogJanitor { FileSystem fs = FileSystem.get(htu.getConfiguration()); - Path rootdir = services.getMasterFileSystem().getRootDir(); + Path rootdir = services.getMasterStorage().getRootDir(); // have to set the root directory since we use it in HFileDisposer to figure out to get to the // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). @@ -708,7 +708,7 @@ public class TestCatalogJanitor { private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir) throws IOException { // get the existing store files - FileSystem fs = services.getMasterFileSystem().getFileSystem(); + FileSystem fs = services.getMasterStorage().getFileSystem(); fs.mkdirs(storedir); // create the store files in the parent for (int i = 0; i < count; i++) { @@ -748,7 +748,7 @@ public class TestCatalogJanitor { final HTableDescriptor htd, final HRegionInfo parent, final HRegionInfo daughter, final byte [] midkey, final boolean top) throws IOException { - Path rootdir = services.getMasterFileSystem().getRootDir(); + Path rootdir = services.getMasterStorage().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable()); Path storedir = HStore.getStoreHomedir(tabledir, daughter, htd.getColumnFamilies()[0].getName()); @@ -757,7 +757,7 @@ public class TestCatalogJanitor { long now = System.currentTimeMillis(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); - FileSystem fs = services.getMasterFileSystem().getFileSystem(); + FileSystem fs = services.getMasterStorage().getFileSystem(); ref.write(fs, p); return p; } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 002438a..1583f33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -101,7 +101,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALSplitter; @@ -217,7 +216,7 @@ public class TestDistributedLogSplitting { // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); @@ -867,7 +866,7 @@ public class TestDistributedLogSplitting { }); int count = 0; - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); Path rootdir = FSUtils.getRootDir(conf); Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf("disableTable")); for (HRegionInfo hri : regions) { @@ -1005,7 +1004,7 @@ public class TestDistributedLogSplitting { startCluster(3); final int NUM_LOG_LINES = 10000; final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); final List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); HRegionServer hrs = findRSToKill(false, "table"); @@ -1124,7 +1123,7 @@ public class TestDistributedLogSplitting { LOG.info("testDelayedDeleteOnFailure"); startCluster(1); final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); - final FileSystem fs = master.getMasterFileSystem().getFileSystem(); + final FileSystem fs = master.getMasterStorage().getFileSystem(); final Path logDir = new Path(FSUtils.getRootDir(conf), "x"); fs.mkdirs(logDir); ExecutorService executor = null; @@ -1442,7 +1441,7 @@ public class TestDistributedLogSplitting { final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", 10); try { - FileSystem fs = master.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterStorage().getFileSystem(); Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), TableName.valueOf("table")); List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir); long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java index 48e143d..3a692d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java @@ -26,8 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; @@ -58,7 +57,7 @@ public class TestMasterFileSystem { @Test public void testFsUriSetProperly() throws Exception { HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterFileSystem fs = master.getMasterFileSystem(); + MasterStorage fs = master.getMasterStorage(); Path masterRoot = FSUtils.getRootDir(fs.getConfiguration()); Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf()); // make sure the fs and the found root dir have the same scheme http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 650171d..6725422 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -102,7 +102,7 @@ public class TestSnapshotFromMaster { UTIL.startMiniCluster(NUM_RS); fs = UTIL.getDFSCluster().getFileSystem(); master = UTIL.getMiniHBaseCluster().getMaster(); - rootDir = master.getMasterFileSystem().getRootDir(); + rootDir = master.getMasterStorage().getRootDir(); archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index a6a882d..245aa88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -124,8 +124,8 @@ public class MasterProcedureTestingUtility { public static void validateTableCreation(final HMaster master, final TableName tableName, final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException { // check filesystem - final FileSystem fs = master.getMasterFileSystem().getFileSystem(); - final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + final FileSystem fs = master.getMasterStorage().getFileSystem(); + final Path tableDir = FSUtils.getTableDir(master.getMasterStorage().getRootDir(), tableName); assertTrue(fs.exists(tableDir)); FSUtils.logFileSystemState(fs, tableDir, LOG); List<Path> allRegionDirs = FSUtils.getRegionDirs(fs, tableDir); @@ -167,8 +167,8 @@ public class MasterProcedureTestingUtility { public static void validateTableDeletion( final HMaster master, final TableName tableName) throws IOException { // check filesystem - final FileSystem fs = master.getMasterFileSystem().getFileSystem(); - final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + final FileSystem fs = master.getMasterStorage().getFileSystem(); + final Path tableDir = FSUtils.getTableDir(master.getMasterStorage().getRootDir(), tableName); assertFalse(fs.exists(tableDir)); // check meta @@ -243,7 +243,7 @@ public class MasterProcedureTestingUtility { assertFalse(htd.hasFamily(deletedFamily.getBytes())); // verify fs - master.getMasterFileSystem().visitStoreFiles(tableName, new StoreFileVisitor() { + master.getMasterStorage().visitStoreFiles(tableName, new StoreFileVisitor() { @Override public void storeFile(HRegionInfo region, String family, StoreFileInfo storeFile) throws IOException { http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index e949fc5..0e54151 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -326,6 +326,6 @@ public class TestMasterFailoverWithProcedures { } private Path getRootDir() { - return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + return UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java index 408da81..ac85920 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java @@ -108,7 +108,7 @@ public class TestMasterProcedureWalLease { Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); final WALProcedureStore backupStore3 = new WALProcedureStore(firstMaster.getConfiguration(), - firstMaster.getMasterFileSystem().getFileSystem(), + firstMaster.getMasterStorage().getFileSystem(), ((WALProcedureStore)masterStore).getLogDir(), new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); // Abort Latch for the test store @@ -188,7 +188,7 @@ public class TestMasterProcedureWalLease { Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); final WALProcedureStore procStore2 = new WALProcedureStore(firstMaster.getConfiguration(), - firstMaster.getMasterFileSystem().getFileSystem(), + firstMaster.getMasterStorage().getFileSystem(), ((WALProcedureStore)procStore).getLogDir(), new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); @@ -227,4 +227,4 @@ public class TestMasterProcedureWalLease { private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() { return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java index c143b9a..78e6e7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java @@ -32,12 +32,11 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -273,7 +272,7 @@ public class TestTableDescriptorModificationFromClient { verifyTableDescriptor(htd, tableName, families); // Verify descriptor from HDFS - MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + MasterStorage mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterStorage(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); HTableDescriptor td = LegacyTableDescriptor.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java index 035b17b..817a2d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; @@ -54,7 +54,7 @@ public class TestSnapshotManager { MetricsMaster metrics = Mockito.mock(MetricsMaster.class); ProcedureCoordinator coordinator = Mockito.mock(ProcedureCoordinator.class); ExecutorService pool = Mockito.mock(ExecutorService.class); - MasterFileSystem mfs = Mockito.mock(MasterFileSystem.class); + MasterStorage mfs = Mockito.mock(MasterStorage.class); FileSystem fs; { try { @@ -72,7 +72,7 @@ public class TestSnapshotManager { throws IOException, KeeperException { Mockito.reset(services); Mockito.when(services.getConfiguration()).thenReturn(conf); - Mockito.when(services.getMasterFileSystem()).thenReturn(mfs); + Mockito.when(services.getMasterStorage()).thenReturn(mfs); Mockito.when(mfs.getFileSystem()).thenReturn(fs); Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir()); return new SnapshotManager(services, metrics, coordinator, pool); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index 49a290c..aa1c7ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -63,7 +63,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionState; @@ -81,7 +81,6 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; @@ -189,7 +188,7 @@ public class TestNamespaceAuditor { @Test public void testValidQuotas() throws Exception { boolean exceptionCaught = false; - MasterFileSystem mfs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem(); + MasterStorage mfs = UTIL.getHBaseCluster().getMaster().getMasterStorage(); NamespaceDescriptor nspDesc = NamespaceDescriptor.create(prefix + "vq1") .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "hihdufh") http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java index 6c851de..97238cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java @@ -60,7 +60,7 @@ public class TestCompactSplitThread { setupConf(TEST_UTIL.getConfiguration()); TEST_UTIL.startMiniCluster(NUM_RS); fs = TEST_UTIL.getDFSCluster().getFileSystem(); - rootDir = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + rootDir = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterStorage().getRootDir(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java index 140b038..331ef7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java @@ -35,15 +35,12 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.fs.FsContext; +import org.apache.hadoop.hbase.fs.StorageContext; import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.TestTableName; import org.junit.After; @@ -106,8 +103,8 @@ public class TestCorruptedRegionStoreFile { // get the store file paths storeFiles.clear(); - UTIL.getHBaseCluster().getMaster().getMasterFileSystem().visitStoreFiles( - FsContext.DATA, tableName, new StoreFileVisitor() { + UTIL.getHBaseCluster().getMaster().getMasterStorage().visitStoreFiles( + StorageContext.DATA, tableName, new StoreFileVisitor() { public void storeFile(HRegionInfo region, String family, StoreFileInfo storeFile) throws IOException { storeFiles.add(storeFile.getPath()); @@ -190,11 +187,11 @@ public class TestCorruptedRegionStoreFile { // Helpers // ========================================================================== private FileSystem getFileSystem() { - return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + return UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem(); } private Path getRootDir() { - return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + return UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); } private void evictHFileCache(final Path hfile) throws Exception { http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index ee3d7d6..f824517 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -239,8 +239,8 @@ public class TestRegionMergeTransactionOnCluster { PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult); HRegionInfo regionA = p.getFirst(); HRegionInfo regionB = p.getSecond(); - FileSystem fs = MASTER.getMasterFileSystem().getFileSystem(); - Path rootDir = MASTER.getMasterFileSystem().getRootDir(); + FileSystem fs = MASTER.getMasterStorage().getFileSystem(); + Path rootDir = MASTER.getMasterStorage().getRootDir(); Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable()); Path regionAdir = new Path(tabledir, regionA.getEncodedName()); http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java index c89860c..508b5dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java @@ -120,11 +120,11 @@ public class TestScannerRetriableFailure { // Helpers // ========================================================================== private FileSystem getFileSystem() { - return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + return UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem(); } private Path getRootDir() { - return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + return UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir(); } public void loadTable(final Table table, int numRows) throws IOException { http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index c980ae6..165acd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -31,7 +31,6 @@ import java.io.InterruptedIOException; import java.util.Collection; import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -39,7 +38,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CoordinatedStateManager; @@ -57,7 +55,6 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -93,8 +90,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; @@ -769,7 +764,7 @@ public class TestSplitTransactionOnCluster { try { // Precondition: we created a table with no data, no store files. printOutRegions(regionServer, "Initial regions: "); - cluster.getMaster().getMasterFileSystem().logFileSystemState(LOG); + cluster.getMaster().getMasterStorage().logStorageState(LOG); List<StoreFileInfo> storefiles = getStoreFiles(tableName); assertEquals("Expected nothing but found " + storefiles.toString(), storefiles.size(), 0); @@ -793,7 +788,7 @@ public class TestSplitTransactionOnCluster { assertTrue(daughters.size() == 2); // check dirs - cluster.getMaster().getMasterFileSystem().logFileSystemState(LOG); + cluster.getMaster().getMasterStorage().logStorageState(LOG); List<StoreFileInfo> storefilesAfter = getStoreFiles(tableName); assertEquals("Expected nothing but found " + storefilesAfter.toString(), storefilesAfter.size(), 0); @@ -949,7 +944,7 @@ public class TestSplitTransactionOnCluster { SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3")); st.prepare(); st.stepsBeforePONR(regionServer, regionServer, false); - assertEquals(3, cluster.getMaster().getMasterFileSystem().getRegions(desc.getTableName()).size()); + assertEquals(3, cluster.getMaster().getMasterStorage().getRegions(desc.getTableName()).size()); cluster.startRegionServer(); regionServer.kill(); // Before we check deadServerInProgress, we should ensure server is dead at master side. @@ -965,7 +960,7 @@ public class TestSplitTransactionOnCluster { AssignmentManager am = cluster.getMaster().getAssignmentManager(); assertEquals(am.getRegionStates().getRegionsInTransition().toString(), 0, am .getRegionStates().getRegionsInTransition().size()); - assertEquals(1, cluster.getMaster().getMasterFileSystem().getRegions(desc.getTableName()).size()); + assertEquals(1, cluster.getMaster().getMasterStorage().getRegions(desc.getTableName()).size()); } finally { TESTING_UTIL.deleteTable(table); } @@ -1364,7 +1359,7 @@ public class TestSplitTransactionOnCluster { private List<StoreFileInfo> getStoreFiles(TableName table) throws IOException { final ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(); - cluster.getMaster().getMasterFileSystem().visitStoreFiles(table, new StoreFileVisitor() { + cluster.getMaster().getMasterStorage().visitStoreFiles(table, new StoreFileVisitor() { @Override public void storeFile(HRegionInfo region, String family, StoreFileInfo storeFile) throws IOException { http://git-wip-us.apache.org/repos/asf/hbase/blob/9f759626/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index d8ceafb..d824d70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.fs.MasterFileSystem; +import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.fs.RegionStorage; import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor; import org.apache.hadoop.hbase.io.HFileLink; @@ -162,7 +162,7 @@ public final class SnapshotTestingUtils { public static void confirmSnapshotValid(HBaseTestingUtility testUtil, HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family) throws IOException { - MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem(); + MasterStorage mfs = testUtil.getHBaseCluster().getMaster().getMasterStorage(); confirmSnapshotValid(snapshotDescriptor, tableName, family, mfs.getRootDir(), testUtil.getHBaseAdmin(), mfs.getFileSystem()); } @@ -424,7 +424,7 @@ public final class SnapshotTestingUtils { */ public static ArrayList corruptSnapshot(final HBaseTestingUtility util, final String snapshotName) throws IOException { - final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem(); + final MasterStorage mfs = util.getHBaseCluster().getMaster().getMasterStorage(); final FileSystem fs = mfs.getFileSystem(); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, @@ -854,7 +854,7 @@ public final class SnapshotTestingUtils { public static void deleteArchiveDirectory(final HBaseTestingUtility util) throws IOException { // Ensure the archiver to be empty - MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + MasterStorage mfs = util.getMiniHBaseCluster().getMaster().getMasterStorage(); Path archiveDir = new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY); mfs.getFileSystem().delete(archiveDir, true); }