Repository: hbase
Updated Branches:
  refs/heads/hbase-14439 9f7596269 -> 300bdfd2a


HBASE-16787 Changed LegacyMasterFileSystem class to LegacyMasterStorage. 
CatalogJanitor is modified to use new storage APIs. Plus a few minor changes.

Signed-off-by: Sean Busbey <bus...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/300bdfd2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/300bdfd2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/300bdfd2

Branch: refs/heads/hbase-14439
Commit: 300bdfd2aa1f99bf3aff848d0dfec9f628818ef2
Parents: 9f75962
Author: Umesh Agashe <uaga...@cloudera.com>
Authored: Mon Oct 3 13:50:07 2016 -0700
Committer: Sean Busbey <bus...@cloudera.com>
Committed: Fri Oct 14 14:05:02 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/fs/MasterStorage.java   |  22 +-
 .../apache/hadoop/hbase/fs/RegionStorage.java   |   7 +
 .../hbase/fs/legacy/LegacyMasterFileSystem.java | 497 ------------------
 .../hbase/fs/legacy/LegacyMasterStorage.java    | 507 +++++++++++++++++++
 .../hbase/fs/legacy/LegacyRegionStorage.java    |  20 +-
 .../hadoop/hbase/master/CatalogJanitor.java     |  64 +--
 .../hbase/regionserver/CompactionTool.java      |   1 -
 .../hbase/master/MockNoopMasterServices.java    |   5 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |   8 +-
 9 files changed, 574 insertions(+), 557 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
index 7b62dea..7adfe70 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.fs.legacy.LegacyMasterFileSystem;
+import org.apache.hadoop.hbase.fs.legacy.LegacyMasterStorage;
 import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
 import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -161,6 +161,24 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
     return RegionStorage.open(conf, regionInfo, false);
   }
 
+  /**
+   * Returns true if region exists on the Storage
+   * @param regionInfo
+   * @return true, if region exists on the storage
+   * @throws IOException
+   */
+  public boolean regionExists(HRegionInfo regionInfo) throws IOException {
+      RegionStorage regionStorage = getRegionStorage(regionInfo);
+      return regionStorage.exists();
+  }
+
+  /**
+   * Archives the specified region's storage artifacts (files, directories etc)
+   * @param regionInfo
+   * @throws IOException
+   */
+  public abstract void archiveRegion(HRegionInfo regionInfo) throws 
IOException;
+
   // ==========================================================================
   //  PUBLIC Methods - visitors
   // ==========================================================================
@@ -263,7 +281,7 @@ public abstract class MasterStorage<IDENTIFIER extends 
StorageIdentifier> {
     String storageType = conf.get("hbase.storage.type", 
"legacy").toLowerCase();
     switch (storageType) {
       case "legacy":
-        return new LegacyMasterFileSystem(conf, fs, new 
LegacyPathIdentifier(rootDir));
+        return new LegacyMasterStorage(conf, fs, new 
LegacyPathIdentifier(rootDir));
       default:
         throw new IOException("Invalid filesystem type " + storageType);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
index 990b00b..1e71e71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java
@@ -155,6 +155,13 @@ public abstract class RegionStorage<IDENTIFIER extends 
StorageIdentifier> {
   public HRegionInfo getRegionInfoForFS() { return hri; }
 
   /**
+   * If region exists on the Storage
+   * @return true, if region related artifacts (dirs, files) present on storage
+   * @throws IOException
+   */
+  public abstract boolean exists() throws IOException;
+
+  /**
    * Retrieve a referene to the backing storage associated with a particular 
family within this region.
    */
   public abstract IDENTIFIER getStoreContainer(final String familyName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
deleted file mode 100644
index caa7ee5..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterFileSystem.java
+++ /dev/null
@@ -1,497 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.fs.legacy;
-
-import java.io.IOException;
-import java.io.FileNotFoundException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.fs.StorageContext;
-import org.apache.hadoop.hbase.fs.MasterStorage;
-import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.MetaUtils;
-
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-
-@InterfaceAudience.Private
-public class LegacyMasterFileSystem extends 
MasterStorage<LegacyPathIdentifier> {
-  private static final Log LOG = 
LogFactory.getLog(LegacyMasterFileSystem.class);
-
-  private final Path sidelineDir;
-  private final Path snapshotDir;
-  private final Path archiveDataDir;
-  private final Path archiveDir;
-  private final Path tmpDataDir;
-  private final Path dataDir;
-  private final Path tmpDir;
-  private final Path bulkDir;
-
-  /*
-   * In a secure env, the protected sub-directories and files under the HBase 
rootDir
-   * would be restricted. The sub-directory will have '700' except the bulk 
load staging dir,
-   * which will have '711'.  The default '700' can be overwritten by setting 
the property
-   * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) 
will have '600'.
-   * The rootDir itself will be created with HDFS default permissions if it 
does not exist.
-   * We will check the rootDir permissions to make sure it has 'x' for all to 
ensure access
-   * to the staging dir. If it does not, we will add it.
-   */
-  // Permissions for the directories under rootDir that need protection
-  private final FsPermission secureRootSubDirPerms;
-  // Permissions for the files under rootDir that need protection
-  private final FsPermission secureRootFilePerms = new FsPermission("600");
-  // Permissions for bulk load staging directory under rootDir
-  private final FsPermission HiddenDirPerms = 
FsPermission.valueOf("-rwx--x--x");
-
-  private final boolean isSecurityEnabled;
-
-  public LegacyMasterFileSystem(Configuration conf, FileSystem fs, 
LegacyPathIdentifier rootDir) {
-    super(conf, fs, rootDir);
-
-    // base directories
-    this.sidelineDir = LegacyLayout.getSidelineDir(rootDir.path);
-    this.snapshotDir = LegacyLayout.getSnapshotDir(rootDir.path);
-    this.archiveDir = LegacyLayout.getArchiveDir(rootDir.path);
-    this.archiveDataDir = LegacyLayout.getDataDir(this.archiveDir);
-    this.dataDir = LegacyLayout.getDataDir(rootDir.path);
-    this.tmpDir = LegacyLayout.getTempDir(rootDir.path);
-    this.tmpDataDir = LegacyLayout.getDataDir(this.tmpDir);
-    this.bulkDir = LegacyLayout.getBulkDir(rootDir.path);
-
-    this.secureRootSubDirPerms = new 
FsPermission(conf.get("hbase.rootdir.perms", "700"));
-    this.isSecurityEnabled = 
"kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - Namespace related
-  // ==========================================================================
-  public void createNamespace(NamespaceDescriptor nsDescriptor) throws 
IOException {
-    getFileSystem().mkdirs(getNamespaceDir(StorageContext.DATA, 
nsDescriptor.getName()));
-  }
-
-  public void deleteNamespace(String namespaceName) throws IOException {
-    FileSystem fs = getFileSystem();
-    Path nsDir = getNamespaceDir(StorageContext.DATA, namespaceName);
-
-    try {
-      for (FileStatus status : fs.listStatus(nsDir)) {
-        if 
(!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
-          throw new IOException("Namespace directory contains table dir: " + 
status.getPath());
-        }
-      }
-      if (!fs.delete(nsDir, true)) {
-        throw new IOException("Failed to remove namespace: " + namespaceName);
-      }
-    } catch (FileNotFoundException e) {
-      // File already deleted, continue
-      LOG.debug("deleteDirectory throws exception: " + e);
-    }
-  }
-
-  public Collection<String> getNamespaces(StorageContext ctx) throws 
IOException {
-    FileStatus[] stats = FSUtils.listStatus(getFileSystem(), 
getNamespaceDir(ctx));
-    if (stats == null) return Collections.emptyList();
-
-    ArrayList<String> namespaces = new ArrayList<String>(stats.length);
-    for (int i = 0; i < stats.length; ++i) {
-      namespaces.add(stats[i].getPath().getName());
-    }
-    return namespaces;
-  }
-
-  // should return or get a NamespaceDescriptor? how is that different from 
HTD?
-
-  // ==========================================================================
-  //  PUBLIC Methods - Table Descriptor related
-  // 
==========================================================================s
-  @Override
-  public boolean createTableDescriptor(StorageContext ctx, HTableDescriptor 
tableDesc, boolean force)
-      throws IOException {
-    return LegacyTableDescriptor.createTableDescriptor(getFileSystem(),
-      getTableDir(ctx, tableDesc.getTableName()), tableDesc, force);
-  }
-
-  @Override
-  public void updateTableDescriptor(StorageContext ctx, HTableDescriptor 
tableDesc) throws IOException {
-    LegacyTableDescriptor.updateTableDescriptor(getFileSystem(),
-        getTableDir(ctx, tableDesc.getTableName()), tableDesc);
-  }
-
-  @Override
-  public HTableDescriptor getTableDescriptor(StorageContext ctx, TableName 
tableName)
-      throws IOException {
-    return LegacyTableDescriptor.getTableDescriptorFromFs(
-        getFileSystem(), getTableDir(ctx, tableName));
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - Table related
-  // ==========================================================================
-  @Override
-  public void deleteTable(StorageContext ctx, TableName tableName) throws 
IOException {
-    Path tableDir = getTableDir(ctx, tableName);
-    if (!FSUtils.deleteDirectory(getFileSystem(), tableDir)) {
-      throw new IOException("Failed delete of " + tableName);
-    }
-  }
-
-  @Override
-  public Collection<TableName> getTables(StorageContext ctx, String namespace)
-      throws IOException {
-    FileStatus[] stats = FSUtils.listStatus(getFileSystem(),
-        getNamespaceDir(ctx, namespace), new 
FSUtils.UserTableDirFilter(getFileSystem()));
-    if (stats == null) return Collections.emptyList();
-
-    ArrayList<TableName> tables = new ArrayList<TableName>(stats.length);
-    for (int i = 0; i < stats.length; ++i) {
-      tables.add(TableName.valueOf(namespace, stats[i].getPath().getName()));
-    }
-    return tables;
-  }
-
-  // ==========================================================================
-  //  PUBLIC Methods - Table Regions related
-  // ==========================================================================
-  @Override
-  public Collection<HRegionInfo> getRegions(StorageContext ctx, TableName 
tableName)
-      throws IOException {
-    FileStatus[] stats = FSUtils.listStatus(getFileSystem(),
-        getTableDir(ctx, tableName), new 
FSUtils.RegionDirFilter(getFileSystem()));
-    if (stats == null) return Collections.emptyList();
-
-    ArrayList<HRegionInfo> regions = new ArrayList<HRegionInfo>(stats.length);
-    for (int i = 0; i < stats.length; ++i) {
-      regions.add(loadRegionInfo(stats[i].getPath()));
-    }
-    return regions;
-  }
-
-  protected HRegionInfo loadRegionInfo(Path regionDir) throws IOException {
-    FSDataInputStream in = 
getFileSystem().open(LegacyLayout.getRegionInfoFile(regionDir));
-    try {
-      return HRegionInfo.parseFrom(in);
-    } finally {
-      in.close();
-    }
-  }
-
-  // ==========================================================================
-  //  PROTECTED Methods - Bootstrap
-  // ==========================================================================
-
-  /**
-   * Create initial layout in filesystem.
-   * <ol>
-   * <li>Check if the meta region exists and is readable, if not create it.
-   * Create hbase.version and the hbase:meta directory if not one.
-   * </li>
-   * <li>Create a log archive directory for RS to put archived logs</li>
-   * </ol>
-   * Idempotent.
-   * @throws IOException
-   */
-  @Override
-  protected ClusterId startup() throws IOException {
-    Configuration c = getConfiguration();
-    Path rc = ((LegacyPathIdentifier)getRootContainer()).path;
-    FileSystem fs = getFileSystem();
-
-    // If FS is in safe mode wait till out of it.
-    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 
1000));
-
-    boolean isSecurityEnabled = 
"kerberos".equalsIgnoreCase(c.get("hbase.security.authentication"));
-    FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", 
"700"));
-
-    // Filesystem is good. Go ahead and check for hbase.rootdir.
-    try {
-      if (!fs.exists(rc)) {
-        if (isSecurityEnabled) {
-          fs.mkdirs(rc, rootDirPerms);
-        } else {
-          fs.mkdirs(rc);
-        }
-        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
-        // We used to handle this by checking the current DN count and waiting 
until
-        // it is nonzero. With security, the check for datanode count doesn't 
work --
-        // it is a privileged op. So instead we adopt the strategy of the 
jobtracker
-        // and simply retry file creation during bootstrap indefinitely. As 
soon as
-        // there is one datanode it will succeed. Permission problems should 
have
-        // already been caught by mkdirs above.
-        FSUtils.setVersion(fs, rc, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
-            10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-      } else {
-        if (!fs.isDirectory(rc)) {
-          throw new IllegalArgumentException(rc.toString() + " is not a 
directory");
-        }
-        if (isSecurityEnabled && 
!rootDirPerms.equals(fs.getFileStatus(rc).getPermission())) {
-          // check whether the permission match
-          LOG.warn("Found rootdir permissions NOT matching expected 
\"hbase.rootdir.perms\" for "
-              + "rootdir=" + rc.toString() + " permissions=" + 
fs.getFileStatus(rc).getPermission()
-              + " and  \"hbase.rootdir.perms\" configured as "
-              + c.get("hbase.rootdir.perms", "700") + ". Automatically setting 
the permissions. You"
-              + " can change the permissions by setting 
\"hbase.rootdir.perms\" in hbase-site.xml "
-              + "and restarting the master");
-          fs.setPermission(rc, rootDirPerms);
-        }
-        // as above
-        FSUtils.checkVersion(fs, rc, true, 
c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
-            10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-      }
-    } catch (DeserializationException de) {
-      LOG.fatal("Please fix invalid configuration for " + 
HConstants.HBASE_DIR, de);
-      IOException ioe = new IOException();
-      ioe.initCause(de);
-      throw ioe;
-    } catch (IllegalArgumentException iae) {
-      LOG.fatal("Please fix invalid configuration for "
-          + HConstants.HBASE_DIR + " " + rc.toString(), iae);
-      throw iae;
-    }
-    // Make sure cluster ID exists
-    if (!FSUtils.checkClusterIdExists(fs, rc, c.getInt(
-        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
-      FSUtils.setClusterId(fs, rc, new ClusterId(), 
c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10
-          * 1000));
-    }
-    return FSUtils.getClusterId(fs, rc);
-  }
-
-  @Override
-  public void logStorageState(Log log) throws IOException {
-    FSUtils.logFileSystemState(getFileSystem(), 
((LegacyPathIdentifier)getRootContainer()).path,
-        LOG);
-  }
-
-  @Override
-  protected void bootstrapMeta() throws IOException {
-    // TODO ask RegionStorage
-    if (!FSUtils.metaRegionExists(getFileSystem(), getRootContainer().path)) {
-      bootstrapMeta(getConfiguration());
-    }
-
-    // Create tableinfo-s for hbase:meta if not already there.
-    // assume, created table descriptor is for enabling table
-    // meta table is a system table, so descriptors are predefined,
-    // we should get them from registry.
-    
createTableDescriptor(HTableDescriptor.metaTableDescriptor(getConfiguration()), 
false);
-  }
-
-  private static void bootstrapMeta(final Configuration c) throws IOException {
-    LOG.info("BOOTSTRAP: creating hbase:meta region");
-    try {
-      // Bootstrapping, make sure blockcache is off.  Else, one will be
-      // created here in bootstrap and it'll need to be cleaned up.  Better to
-      // not make it in first place.  Turn off block caching for bootstrap.
-      // Enable after.
-      HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
-      HTableDescriptor metaDescriptor = 
HTableDescriptor.metaTableDescriptor(c);
-      MetaUtils.setInfoFamilyCachingForMeta(metaDescriptor, false);
-      HRegion meta = HRegion.createHRegion(c, metaDescriptor, metaHRI, null);
-      MetaUtils.setInfoFamilyCachingForMeta(metaDescriptor, true);
-      meta.close();
-    } catch (IOException e) {
-        e = e instanceof RemoteException ?
-                ((RemoteException)e).unwrapRemoteException() : e;
-      LOG.error("bootstrap", e);
-      throw e;
-    }
-  }
-
-  @Override
-  protected void startupCleanup() throws IOException {
-    final FileSystem fs = getFileSystem();
-    // Check the directories under rootdir.
-    checkTempDir(getTempContainer().path, getConfiguration(), getFileSystem());
-    final String[] protectedSubDirs = new String[] {
-        HConstants.BASE_NAMESPACE_DIR,
-        HConstants.HFILE_ARCHIVE_DIRECTORY,
-        HConstants.HREGION_LOGDIR_NAME,
-        HConstants.HREGION_OLDLOGDIR_NAME,
-        MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR,
-        HConstants.CORRUPT_DIR_NAME,
-        HConstants.HBCK_SIDELINEDIR_NAME,
-        MobConstants.MOB_DIR_NAME
-    };
-    for (String subDir : protectedSubDirs) {
-      checkSubDir(new Path(getRootContainer().path, subDir));
-    }
-
-    checkStagingDir();
-
-    // Handle the last few special files and set the final rootDir permissions
-    // rootDir needs 'x' for all to support bulk load staging dir
-    if (isSecurityEnabled) {
-      fs.setPermission(new Path(getRootContainer().path, 
HConstants.VERSION_FILE_NAME), secureRootFilePerms);
-      fs.setPermission(new Path(getRootContainer().path, 
HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
-    }
-    FsPermission currentRootPerms = 
fs.getFileStatus(getRootContainer().path).getPermission();
-    if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)
-        || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE)
-        || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) {
-      LOG.warn("rootdir permissions do not contain 'excute' for user, group or 
other. "
-        + "Automatically adding 'excute' permission for all");
-      fs.setPermission(
-        getRootContainer().path,
-        new 
FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), 
currentRootPerms
-            .getGroupAction().or(FsAction.EXECUTE), 
currentRootPerms.getOtherAction().or(
-          FsAction.EXECUTE)));
-    }
-  }
-
-  /**
-   * Make sure the hbase temp directory exists and is empty.
-   * NOTE that this method is only executed once just after the master becomes 
the active one.
-   */
-  private void checkTempDir(final Path tmpdir, final Configuration c, final 
FileSystem fs)
-      throws IOException {
-    // If the temp directory exists, clear the content (left over, from the 
previous run)
-    if (fs.exists(tmpdir)) {
-      // Archive table in temp, maybe left over from failed deletion,
-      // if not the cleaner will take care of them.
-      for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
-        for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
-          HFileArchiver.archiveRegion(fs, getRootContainer().path, tabledir, 
regiondir);
-        }
-      }
-      if (!fs.delete(tmpdir, true)) {
-        throw new IOException("Unable to clean the temp directory: " + tmpdir);
-      }
-    }
-
-    // Create the temp directory
-    if (isSecurityEnabled) {
-      if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
-        throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
-      }
-    } else {
-      if (!fs.mkdirs(tmpdir)) {
-        throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
-      }
-    }
-  }
-
-  /**
-   * Make sure the directories under rootDir have good permissions. Create if 
necessary.
-   * @param p
-   * @throws IOException
-   */
-  private void checkSubDir(final Path p) throws IOException {
-    final FileSystem fs = getFileSystem();
-    if (!fs.exists(p)) {
-      if (isSecurityEnabled) {
-        if (!fs.mkdirs(p, secureRootSubDirPerms)) {
-          throw new IOException("HBase directory '" + p + "' creation 
failure.");
-        }
-      } else {
-        if (!fs.mkdirs(p)) {
-          throw new IOException("HBase directory '" + p + "' creation 
failure.");
-        }
-      }
-    }
-    else {
-      if (isSecurityEnabled && 
!secureRootSubDirPerms.equals(fs.getFileStatus(p).getPermission())) {
-        // check whether the permission match
-        LOG.warn("Found HBase directory permissions NOT matching expected 
permissions for "
-            + p.toString() + " permissions=" + 
fs.getFileStatus(p).getPermission()
-            + ", expecting " + secureRootSubDirPerms + ". Automatically 
setting the permissions. "
-            + "You can change the permissions by setting 
\"hbase.rootdir.perms\" in hbase-site.xml "
-            + "and restarting the master");
-        fs.setPermission(p, secureRootSubDirPerms);
-      }
-    }
-  }
-
-  /**
-   * Check permissions for bulk load staging directory. This directory has 
special hidden
-   * permissions. Create it if necessary.
-   * @throws IOException
-   */
-  private void checkStagingDir() throws IOException {
-    final FileSystem fs = getFileSystem();
-    Path p = new Path(getRootContainer().path, 
HConstants.BULKLOAD_STAGING_DIR_NAME);
-    try {
-      if (!fs.exists(p)) {
-        if (!fs.mkdirs(p, HiddenDirPerms)) {
-          throw new IOException("Failed to create staging directory " + 
p.toString());
-        }
-      } else {
-        fs.setPermission(p, HiddenDirPerms);
-      }
-    } catch (IOException e) {
-      LOG.error("Failed to create or set permission on staging directory " + 
p.toString());
-      throw new IOException("Failed to create or set permission on staging 
directory "
-          + p.toString(), e);
-    }
-  }
-
-  // ==========================================================================
-  //  PROTECTED Methods - Path
-  // ==========================================================================
-  protected Path getNamespaceDir(StorageContext ctx) {
-    return getBaseDirFromContext(ctx);
-  }
-
-  protected Path getNamespaceDir(StorageContext ctx, String namespace) {
-    return LegacyLayout.getNamespaceDir(getBaseDirFromContext(ctx), namespace);
-  }
-
-  protected Path getTableDir(StorageContext ctx, TableName table) {
-    return LegacyLayout.getTableDir(getBaseDirFromContext(ctx), table);
-  }
-
-  protected Path getRegionDir(StorageContext ctx, TableName table, HRegionInfo 
hri) {
-    return LegacyLayout.getRegionDir(getTableDir(ctx, table), hri);
-  }
-
-  @Override
-  public LegacyPathIdentifier getTempContainer() {
-    return new LegacyPathIdentifier(tmpDir);
-  }
-
-  protected Path getBaseDirFromContext(StorageContext ctx) {
-    switch (ctx) {
-      case TEMP: return tmpDataDir;
-      case DATA: return dataDir;
-      case ARCHIVE: return archiveDataDir;
-      case SNAPSHOT: return snapshotDir;
-      case SIDELINE: return sidelineDir;
-      default: throw new RuntimeException("Invalid context: " + ctx);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
new file mode 100644
index 0000000..db6304f
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java
@@ -0,0 +1,507 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs.legacy;
+
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.StorageContext;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.MetaUtils;
+
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+
+@InterfaceAudience.Private
+public class LegacyMasterStorage extends MasterStorage<LegacyPathIdentifier> {
+  private static final Log LOG = LogFactory.getLog(LegacyMasterStorage.class);
+
+  private final Path sidelineDir;
+  private final Path snapshotDir;
+  private final Path archiveDataDir;
+  private final Path archiveDir;
+  private final Path tmpDataDir;
+  private final Path dataDir;
+  private final Path tmpDir;
+  private final Path bulkDir;
+
+  /*
+   * In a secure env, the protected sub-directories and files under the HBase 
rootDir
+   * would be restricted. The sub-directory will have '700' except the bulk 
load staging dir,
+   * which will have '711'.  The default '700' can be overwritten by setting 
the property
+   * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) 
will have '600'.
+   * The rootDir itself will be created with HDFS default permissions if it 
does not exist.
+   * We will check the rootDir permissions to make sure it has 'x' for all to 
ensure access
+   * to the staging dir. If it does not, we will add it.
+   */
+  // Permissions for the directories under rootDir that need protection
+  private final FsPermission secureRootSubDirPerms;
+  // Permissions for the files under rootDir that need protection
+  private final FsPermission secureRootFilePerms = new FsPermission("600");
+  // Permissions for bulk load staging directory under rootDir
+  private final FsPermission HiddenDirPerms = 
FsPermission.valueOf("-rwx--x--x");
+
+  private final boolean isSecurityEnabled;
+
+  public LegacyMasterStorage(Configuration conf, FileSystem fs, 
LegacyPathIdentifier rootDir) {
+    super(conf, fs, rootDir);
+
+    // base directories
+    this.sidelineDir = LegacyLayout.getSidelineDir(rootDir.path);
+    this.snapshotDir = LegacyLayout.getSnapshotDir(rootDir.path);
+    this.archiveDir = LegacyLayout.getArchiveDir(rootDir.path);
+    this.archiveDataDir = LegacyLayout.getDataDir(this.archiveDir);
+    this.dataDir = LegacyLayout.getDataDir(rootDir.path);
+    this.tmpDir = LegacyLayout.getTempDir(rootDir.path);
+    this.tmpDataDir = LegacyLayout.getDataDir(this.tmpDir);
+    this.bulkDir = LegacyLayout.getBulkDir(rootDir.path);
+
+    this.secureRootSubDirPerms = new 
FsPermission(conf.get("hbase.rootdir.perms", "700"));
+    this.isSecurityEnabled = 
"kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - Namespace related
+  // ==========================================================================
+  public void createNamespace(NamespaceDescriptor nsDescriptor) throws 
IOException {
+    getFileSystem().mkdirs(getNamespaceDir(StorageContext.DATA, 
nsDescriptor.getName()));
+  }
+
+  public void deleteNamespace(String namespaceName) throws IOException {
+    FileSystem fs = getFileSystem();
+    Path nsDir = getNamespaceDir(StorageContext.DATA, namespaceName);
+
+    try {
+      for (FileStatus status : fs.listStatus(nsDir)) {
+        if 
(!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
+          throw new IOException("Namespace directory contains table dir: " + 
status.getPath());
+        }
+      }
+      if (!fs.delete(nsDir, true)) {
+        throw new IOException("Failed to remove namespace: " + namespaceName);
+      }
+    } catch (FileNotFoundException e) {
+      // File already deleted, continue
+      LOG.debug("deleteDirectory throws exception: " + e);
+    }
+  }
+
+  public Collection<String> getNamespaces(StorageContext ctx) throws 
IOException {
+    FileStatus[] stats = FSUtils.listStatus(getFileSystem(), 
getNamespaceDir(ctx));
+    if (stats == null) return Collections.emptyList();
+
+    ArrayList<String> namespaces = new ArrayList<String>(stats.length);
+    for (int i = 0; i < stats.length; ++i) {
+      namespaces.add(stats[i].getPath().getName());
+    }
+    return namespaces;
+  }
+
+  // should return or get a NamespaceDescriptor? how is that different from 
HTD?
+
+  // ==========================================================================
+  //  PUBLIC Methods - Table Descriptor related
+  // 
==========================================================================s
+  @Override
+  public boolean createTableDescriptor(StorageContext ctx, HTableDescriptor 
tableDesc, boolean force)
+      throws IOException {
+    return LegacyTableDescriptor.createTableDescriptor(getFileSystem(),
+      getTableDir(ctx, tableDesc.getTableName()), tableDesc, force);
+  }
+
+  @Override
+  public void updateTableDescriptor(StorageContext ctx, HTableDescriptor 
tableDesc) throws IOException {
+    LegacyTableDescriptor.updateTableDescriptor(getFileSystem(),
+        getTableDir(ctx, tableDesc.getTableName()), tableDesc);
+  }
+
+  @Override
+  public HTableDescriptor getTableDescriptor(StorageContext ctx, TableName 
tableName)
+      throws IOException {
+    return LegacyTableDescriptor.getTableDescriptorFromFs(
+        getFileSystem(), getTableDir(ctx, tableName));
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - Table related
+  // ==========================================================================
+  @Override
+  public void deleteTable(StorageContext ctx, TableName tableName) throws 
IOException {
+    Path tableDir = getTableDir(ctx, tableName);
+    if (!FSUtils.deleteDirectory(getFileSystem(), tableDir)) {
+      throw new IOException("Failed delete of " + tableName);
+    }
+  }
+
+  @Override
+  public Collection<TableName> getTables(StorageContext ctx, String namespace)
+      throws IOException {
+    FileStatus[] stats = FSUtils.listStatus(getFileSystem(),
+        getNamespaceDir(ctx, namespace), new 
FSUtils.UserTableDirFilter(getFileSystem()));
+    if (stats == null) return Collections.emptyList();
+
+    ArrayList<TableName> tables = new ArrayList<TableName>(stats.length);
+    for (int i = 0; i < stats.length; ++i) {
+      tables.add(TableName.valueOf(namespace, stats[i].getPath().getName()));
+    }
+    return tables;
+  }
+
+  // ==========================================================================
+  //  PUBLIC Methods - Table Regions related
+  // ==========================================================================
+  @Override
+  public Collection<HRegionInfo> getRegions(StorageContext ctx, TableName 
tableName)
+      throws IOException {
+    FileStatus[] stats = FSUtils.listStatus(getFileSystem(),
+        getTableDir(ctx, tableName), new 
FSUtils.RegionDirFilter(getFileSystem()));
+    if (stats == null) return Collections.emptyList();
+
+    ArrayList<HRegionInfo> regions = new ArrayList<HRegionInfo>(stats.length);
+    for (int i = 0; i < stats.length; ++i) {
+      regions.add(loadRegionInfo(stats[i].getPath()));
+    }
+    return regions;
+  }
+
+  protected HRegionInfo loadRegionInfo(Path regionDir) throws IOException {
+    FSDataInputStream in = 
getFileSystem().open(LegacyLayout.getRegionInfoFile(regionDir));
+    try {
+      return HRegionInfo.parseFrom(in);
+    } finally {
+      in.close();
+    }
+  }
+
+  /**
+   * Archives the specified region's storage artifacts (files, directories etc)
+   * @param regionInfo
+   * @throws IOException
+   */
+  @Override
+  public void archiveRegion(HRegionInfo regionInfo) throws IOException {
+    HFileArchiver.archiveRegion(getConfiguration(), getFileSystem(), 
regionInfo);
+  }
+
+  // ==========================================================================
+  //  PROTECTED Methods - Bootstrap
+  // ==========================================================================
+
+  /**
+   * Create initial layout in filesystem.
+   * <ol>
+   * <li>Check if the meta region exists and is readable, if not create it.
+   * Create hbase.version and the hbase:meta directory if not one.
+   * </li>
+   * <li>Create a log archive directory for RS to put archived logs</li>
+   * </ol>
+   * Idempotent.
+   * @throws IOException
+   */
+  @Override
+  protected ClusterId startup() throws IOException {
+    Configuration c = getConfiguration();
+    Path rc = ((LegacyPathIdentifier)getRootContainer()).path;
+    FileSystem fs = getFileSystem();
+
+    // If FS is in safe mode wait till out of it.
+    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 
1000));
+
+    boolean isSecurityEnabled = 
"kerberos".equalsIgnoreCase(c.get("hbase.security.authentication"));
+    FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", 
"700"));
+
+    // Filesystem is good. Go ahead and check for hbase.rootdir.
+    try {
+      if (!fs.exists(rc)) {
+        if (isSecurityEnabled) {
+          fs.mkdirs(rc, rootDirPerms);
+        } else {
+          fs.mkdirs(rc);
+        }
+        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
+        // We used to handle this by checking the current DN count and waiting 
until
+        // it is nonzero. With security, the check for datanode count doesn't 
work --
+        // it is a privileged op. So instead we adopt the strategy of the 
jobtracker
+        // and simply retry file creation during bootstrap indefinitely. As 
soon as
+        // there is one datanode it will succeed. Permission problems should 
have
+        // already been caught by mkdirs above.
+        FSUtils.setVersion(fs, rc, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
+            10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+      } else {
+        if (!fs.isDirectory(rc)) {
+          throw new IllegalArgumentException(rc.toString() + " is not a 
directory");
+        }
+        if (isSecurityEnabled && 
!rootDirPerms.equals(fs.getFileStatus(rc).getPermission())) {
+          // check whether the permission match
+          LOG.warn("Found rootdir permissions NOT matching expected 
\"hbase.rootdir.perms\" for "
+              + "rootdir=" + rc.toString() + " permissions=" + 
fs.getFileStatus(rc).getPermission()
+              + " and  \"hbase.rootdir.perms\" configured as "
+              + c.get("hbase.rootdir.perms", "700") + ". Automatically setting 
the permissions. You"
+              + " can change the permissions by setting 
\"hbase.rootdir.perms\" in hbase-site.xml "
+              + "and restarting the master");
+          fs.setPermission(rc, rootDirPerms);
+        }
+        // as above
+        FSUtils.checkVersion(fs, rc, true, 
c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
+            10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
+            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
+      }
+    } catch (DeserializationException de) {
+      LOG.fatal("Please fix invalid configuration for " + 
HConstants.HBASE_DIR, de);
+      IOException ioe = new IOException();
+      ioe.initCause(de);
+      throw ioe;
+    } catch (IllegalArgumentException iae) {
+      LOG.fatal("Please fix invalid configuration for "
+          + HConstants.HBASE_DIR + " " + rc.toString(), iae);
+      throw iae;
+    }
+    // Make sure cluster ID exists
+    if (!FSUtils.checkClusterIdExists(fs, rc, c.getInt(
+        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
+      FSUtils.setClusterId(fs, rc, new ClusterId(), 
c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10
+          * 1000));
+    }
+    return FSUtils.getClusterId(fs, rc);
+  }
+
+  @Override
+  public void logStorageState(Log log) throws IOException {
+    FSUtils.logFileSystemState(getFileSystem(), 
((LegacyPathIdentifier)getRootContainer()).path,
+        LOG);
+  }
+
+  @Override
+  protected void bootstrapMeta() throws IOException {
+    // TODO ask RegionStorage
+    if (!FSUtils.metaRegionExists(getFileSystem(), getRootContainer().path)) {
+      bootstrapMeta(getConfiguration());
+    }
+
+    // Create tableinfo-s for hbase:meta if not already there.
+    // assume, created table descriptor is for enabling table
+    // meta table is a system table, so descriptors are predefined,
+    // we should get them from registry.
+    
createTableDescriptor(HTableDescriptor.metaTableDescriptor(getConfiguration()), 
false);
+  }
+
+  private static void bootstrapMeta(final Configuration c) throws IOException {
+    LOG.info("BOOTSTRAP: creating hbase:meta region");
+    try {
+      // Bootstrapping, make sure blockcache is off.  Else, one will be
+      // created here in bootstrap and it'll need to be cleaned up.  Better to
+      // not make it in first place.  Turn off block caching for bootstrap.
+      // Enable after.
+      HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
+      HTableDescriptor metaDescriptor = 
HTableDescriptor.metaTableDescriptor(c);
+      MetaUtils.setInfoFamilyCachingForMeta(metaDescriptor, false);
+      HRegion meta = HRegion.createHRegion(c, metaDescriptor, metaHRI, null);
+      MetaUtils.setInfoFamilyCachingForMeta(metaDescriptor, true);
+      meta.close();
+    } catch (IOException e) {
+        e = e instanceof RemoteException ?
+                ((RemoteException)e).unwrapRemoteException() : e;
+      LOG.error("bootstrap", e);
+      throw e;
+    }
+  }
+
+  @Override
+  protected void startupCleanup() throws IOException {
+    final FileSystem fs = getFileSystem();
+    // Check the directories under rootdir.
+    checkTempDir(getTempContainer().path, getConfiguration(), getFileSystem());
+    final String[] protectedSubDirs = new String[] {
+        HConstants.BASE_NAMESPACE_DIR,
+        HConstants.HFILE_ARCHIVE_DIRECTORY,
+        HConstants.HREGION_LOGDIR_NAME,
+        HConstants.HREGION_OLDLOGDIR_NAME,
+        MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR,
+        HConstants.CORRUPT_DIR_NAME,
+        HConstants.HBCK_SIDELINEDIR_NAME,
+        MobConstants.MOB_DIR_NAME
+    };
+    for (String subDir : protectedSubDirs) {
+      checkSubDir(new Path(getRootContainer().path, subDir));
+    }
+
+    checkStagingDir();
+
+    // Handle the last few special files and set the final rootDir permissions
+    // rootDir needs 'x' for all to support bulk load staging dir
+    if (isSecurityEnabled) {
+      fs.setPermission(new Path(getRootContainer().path, 
HConstants.VERSION_FILE_NAME), secureRootFilePerms);
+      fs.setPermission(new Path(getRootContainer().path, 
HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
+    }
+    FsPermission currentRootPerms = 
fs.getFileStatus(getRootContainer().path).getPermission();
+    if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)
+        || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE)
+        || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) {
+      LOG.warn("rootdir permissions do not contain 'excute' for user, group or 
other. "
+        + "Automatically adding 'excute' permission for all");
+      fs.setPermission(
+        getRootContainer().path,
+        new 
FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), 
currentRootPerms
+            .getGroupAction().or(FsAction.EXECUTE), 
currentRootPerms.getOtherAction().or(
+          FsAction.EXECUTE)));
+    }
+  }
+
+  /**
+   * Make sure the hbase temp directory exists and is empty.
+   * NOTE that this method is only executed once just after the master becomes 
the active one.
+   */
+  private void checkTempDir(final Path tmpdir, final Configuration c, final 
FileSystem fs)
+      throws IOException {
+    // If the temp directory exists, clear the content (left over, from the 
previous run)
+    if (fs.exists(tmpdir)) {
+      // Archive table in temp, maybe left over from failed deletion,
+      // if not the cleaner will take care of them.
+      for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
+        for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
+          HFileArchiver.archiveRegion(fs, getRootContainer().path, tabledir, 
regiondir);
+        }
+      }
+      if (!fs.delete(tmpdir, true)) {
+        throw new IOException("Unable to clean the temp directory: " + tmpdir);
+      }
+    }
+
+    // Create the temp directory
+    if (isSecurityEnabled) {
+      if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
+        throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
+      }
+    } else {
+      if (!fs.mkdirs(tmpdir)) {
+        throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
+      }
+    }
+  }
+
+  /**
+   * Make sure the directories under rootDir have good permissions. Create if 
necessary.
+   * @param p
+   * @throws IOException
+   */
+  private void checkSubDir(final Path p) throws IOException {
+    final FileSystem fs = getFileSystem();
+    if (!fs.exists(p)) {
+      if (isSecurityEnabled) {
+        if (!fs.mkdirs(p, secureRootSubDirPerms)) {
+          throw new IOException("HBase directory '" + p + "' creation 
failure.");
+        }
+      } else {
+        if (!fs.mkdirs(p)) {
+          throw new IOException("HBase directory '" + p + "' creation 
failure.");
+        }
+      }
+    }
+    else {
+      if (isSecurityEnabled && 
!secureRootSubDirPerms.equals(fs.getFileStatus(p).getPermission())) {
+        // check whether the permission match
+        LOG.warn("Found HBase directory permissions NOT matching expected 
permissions for "
+            + p.toString() + " permissions=" + 
fs.getFileStatus(p).getPermission()
+            + ", expecting " + secureRootSubDirPerms + ". Automatically 
setting the permissions. "
+            + "You can change the permissions by setting 
\"hbase.rootdir.perms\" in hbase-site.xml "
+            + "and restarting the master");
+        fs.setPermission(p, secureRootSubDirPerms);
+      }
+    }
+  }
+
+  /**
+   * Check permissions for bulk load staging directory. This directory has 
special hidden
+   * permissions. Create it if necessary.
+   * @throws IOException
+   */
+  private void checkStagingDir() throws IOException {
+    final FileSystem fs = getFileSystem();
+    Path p = new Path(getRootContainer().path, 
HConstants.BULKLOAD_STAGING_DIR_NAME);
+    try {
+      if (!fs.exists(p)) {
+        if (!fs.mkdirs(p, HiddenDirPerms)) {
+          throw new IOException("Failed to create staging directory " + 
p.toString());
+        }
+      } else {
+        fs.setPermission(p, HiddenDirPerms);
+      }
+    } catch (IOException e) {
+      LOG.error("Failed to create or set permission on staging directory " + 
p.toString());
+      throw new IOException("Failed to create or set permission on staging 
directory "
+          + p.toString(), e);
+    }
+  }
+
+  // ==========================================================================
+  //  PROTECTED Methods - Path
+  // ==========================================================================
+  protected Path getNamespaceDir(StorageContext ctx) {
+    return getBaseDirFromContext(ctx);
+  }
+
+  protected Path getNamespaceDir(StorageContext ctx, String namespace) {
+    return LegacyLayout.getNamespaceDir(getBaseDirFromContext(ctx), namespace);
+  }
+
+  protected Path getTableDir(StorageContext ctx, TableName table) {
+    return LegacyLayout.getTableDir(getBaseDirFromContext(ctx), table);
+  }
+
+  protected Path getRegionDir(StorageContext ctx, TableName table, HRegionInfo 
hri) {
+    return LegacyLayout.getRegionDir(getTableDir(ctx, table), hri);
+  }
+
+  @Override
+  public LegacyPathIdentifier getTempContainer() {
+    return new LegacyPathIdentifier(tmpDir);
+  }
+
+  protected Path getBaseDirFromContext(StorageContext ctx) {
+    switch (ctx) {
+      case TEMP: return tmpDataDir;
+      case DATA: return dataDir;
+      case ARCHIVE: return archiveDataDir;
+      case SNAPSHOT: return snapshotDir;
+      case SIDELINE: return sidelineDir;
+      default: throw new RuntimeException("Invalid context: " + ctx);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyRegionStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyRegionStorage.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyRegionStorage.java
index 2785950..00f279b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyRegionStorage.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyRegionStorage.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.fs.legacy;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -39,23 +38,18 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.ipc.RemoteException;
 
-import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.fs.FSUtilsWithRetries;
 import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.MetaUtils;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -66,9 +60,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.regionserver.*;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSHDFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 
 @InterfaceAudience.Private
@@ -119,6 +111,18 @@ public class LegacyRegionStorage extends 
RegionStorage<LegacyPathIdentifier> {
       in.close();
     }
   }
+
+  /**
+   * If region exists on the Storage
+   * @return true, if region related artifacts (dirs, files) present on storage
+   * @throws IOException
+   */
+  @Override
+  public boolean exists() throws IOException {
+    // TODO: see if more checks are required for .regioninfo file etc
+    return FSUtils.isExists(getFileSystem(), regionDir);
+  }
+
   // ==========================================================================
   //  PUBLIC Methods - Families Related
   // ==========================================================================

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 6f958f0..102c81d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -29,23 +29,17 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.fs.RegionStorage;
-import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
+import org.apache.hadoop.hbase.fs.MasterStorage;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
@@ -148,7 +142,7 @@ public class CatalogJanitor extends ScheduledChore {
     final boolean isTableSpecified = (tableName != null);
     // TODO: Only works with single hbase:meta region currently.  Fix.
     final AtomicInteger count = new AtomicInteger(0);
-    // Keep Map of found split parents.  There are candidates for cleanup.
+    // Keep Map of found split parents.  These are candidates for cleanup.
     // Use a comparator that has split parents come before its daughters.
     final Map<HRegionInfo, Result> splitParents =
       new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
@@ -197,26 +191,23 @@ public class CatalogJanitor extends ScheduledChore {
   boolean cleanMergeRegion(final HRegionInfo mergedRegion,
       final HRegionInfo regionA, final HRegionInfo regionB) throws IOException 
{
     HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
-    RegionStorage regionFs = null;
+    MasterStorage ms = this.services.getMasterStorage();
+
     try {
-      regionFs = RegionStorage.open(this.services.getConfiguration(), 
mergedRegion, false);
+      if (ms.getRegionStorage(mergedRegion).hasReferences(htd)) return false;
     } catch (IOException e) {
       LOG.warn("Merged region does not exist: " + 
mergedRegion.getEncodedName());
     }
-    if (regionFs == null || !regionFs.hasReferences(htd)) {
+
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
           + regionB.getRegionNameAsString()
-          + " from fs because merged region no longer holds references");
-      // TODO update HFileArchiver to use RegionStorage
-      FileSystem fs = this.services.getMasterStorage().getFileSystem();
-      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionA);
-      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionB);
-      MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), 
mergedRegion);
-      services.getServerManager().removeRegion(regionA);
-      services.getServerManager().removeRegion(regionB);
-      return true;
+          + " from storage because merged region no longer holds references");
     }
-    return false;
+    ms.archiveRegion(regionA);
+    ms.archiveRegion(regionB);
+    MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), 
mergedRegion);
+    return true;
   }
 
   /**
@@ -352,11 +343,12 @@ public class CatalogJanitor extends ScheduledChore {
     Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
     Pair<Boolean, Boolean> b = checkDaughterInFs(parent, 
daughters.getSecond());
     if (hasNoReferences(a) && hasNoReferences(b)) {
-      LOG.debug("Deleting region " + parent.getRegionNameAsString() +
-        " because daughter splits no longer hold references");
-      FileSystem fs = this.services.getMasterStorage().getFileSystem();
-      if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + 
parent);
-      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
parent);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Deleting region " + parent.getRegionNameAsString()
+            + " because daughter splits no longer hold references");
+      }
+      MasterStorage ms = this.services.getMasterStorage();
+      ms.archiveRegion(parent);
       MetaTableAccessor.deleteRegion(this.connection, parent);
       services.getServerManager().removeRegion(parent);
       result = true;
@@ -390,33 +382,21 @@ public class CatalogJanitor extends ScheduledChore {
       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
 
-    FileSystem fs = this.services.getMasterStorage().getFileSystem();
-    Path rootdir = ((LegacyPathIdentifier) 
this.services.getMasterStorage().getRootContainer())
-        .path;
-    Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
-
-    Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
-
+    MasterStorage ms = this.services.getMasterStorage();
     try {
-      if (!FSUtils.isExists(fs, daughterRegionDir)) {
+      if (!ms.regionExists(daughter)) {
         return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
       }
     } catch (IOException ioe) {
       LOG.error("Error trying to determine if daughter region exists, " +
-               "assuming exists and has references", ioe);
+          "assuming exists and has references", ioe);
       return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
     }
 
     boolean references = false;
     try {
-      final RegionStorage regionFs = 
RegionStorage.open(this.services.getConfiguration(), daughter, false);
       final HTableDescriptor parentDescriptor = 
getTableDescriptor(parent.getTable());
-
-      for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
-        if ((references = regionFs.hasReferences(family.getNameAsString()))) {
-          break;
-        }
-      }
+      references = 
ms.getRegionStorage(daughter).hasReferences(parentDescriptor);
     } catch (IOException e) {
       LOG.error("Error trying to determine referenced files from : " + 
daughter.getEncodedName()
           + ", to: " + parent.getEncodedName() + " assuming has references", 
e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index f071f27..d9e1dc6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.mapreduce.JobUtil;

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index ee232d5..a3945cc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -33,16 +33,15 @@ import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.fs.MasterStorage;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.mockito.Mockito;
 
 import com.google.protobuf.Service;
 
@@ -103,7 +102,7 @@ public class MockNoopMasterServices implements 
MasterServices, Server {
   }
 
   @Override
-  public MasterFileSystem getMasterStorage() {
+  public MasterStorage getMasterStorage() {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/300bdfd2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index fa8a74f..011c763 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -90,7 +90,7 @@ public class TestCatalogJanitor {
    */
   class MockMasterServices extends MockNoopMasterServices {
     private final ClusterConnection connection;
-    private final MasterStorage mfs;
+    private final MasterStorage ms;
     private final AssignmentManager asm;
     private final ServerManager sm;
 
@@ -134,7 +134,7 @@ public class TestCatalogJanitor {
       FSUtils.setRootDir(getConfiguration(), rootdir);
       Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
 
-      this.mfs = new MasterStorage(this);
+      this.ms = new MasterStorage(this);
       this.asm = Mockito.mock(AssignmentManager.class);
       this.sm = Mockito.mock(ServerManager.class);
     }
@@ -146,7 +146,7 @@ public class TestCatalogJanitor {
 
     @Override
     public MasterStorage getMasterStorage() {
-      return this.mfs;
+      return this.ms;
     }
 
     @Override
@@ -244,7 +244,7 @@ public class TestCatalogJanitor {
       // remove the parent.
       Result r = createResult(parent, splita, splitb);
       // Add a reference under splitA directory so we don't clear out the 
parent.
-      Path rootdir = services.getMasterStorage().getRootDir();
+      Path rootdir = services.getMasterStorage().getRootContainer();
       Path tabledir =
         FSUtils.getTableDir(rootdir, htd.getTableName());
       Path storedir = HStore.getStoreHomedir(tabledir, splita,

Reply via email to