http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
deleted file mode 100644
index 508b4a7..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ /dev/null
@@ -1,1108 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSHDFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-
-/**
- * View to an on-disk Region.
- * Provides the set of methods necessary to interact with the on-disk region 
data.
- */
-@InterfaceAudience.Private
-public class HRegionFileSystem {
-  private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
-
-  /** Name of the region info file that resides just under the region 
directory. */
-  public final static String REGION_INFO_FILE = ".regioninfo";
-
-  /** Temporary subdirectory of the region directory used for merges. */
-  public static final String REGION_MERGES_DIR = ".merges";
-
-  /** Temporary subdirectory of the region directory used for splits. */
-  public static final String REGION_SPLITS_DIR = ".splits";
-
-  /** Temporary subdirectory of the region directory used for compaction 
output. */
-  private static final String REGION_TEMP_DIR = ".tmp";
-
-  private final HRegionInfo regionInfo;
-  //regionInfo for interacting with FS (getting encodedName, etc)
-  private final HRegionInfo regionInfoForFs;
-  private final Configuration conf;
-  private final Path tableDir;
-  private final FileSystem fs;
-
-  /**
-   * In order to handle NN connectivity hiccups, one need to retry 
non-idempotent operation at the
-   * client level.
-   */
-  private final int hdfsClientRetriesNumber;
-  private final int baseSleepBeforeRetries;
-  private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
-  private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
-
-  /**
-   * Create a view to the on-disk region
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} that contains the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region
-   */
-  HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path 
tableDir,
-      final HRegionInfo regionInfo) {
-    this.fs = fs;
-    this.conf = conf;
-    this.tableDir = tableDir;
-    this.regionInfo = regionInfo;
-    this.regionInfoForFs = 
ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
-    this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
-      DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
-    this.baseSleepBeforeRetries = 
conf.getInt("hdfs.client.sleep.before.retries",
-      DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
- }
-
-  /** @return the underlying {@link FileSystem} */
-  public FileSystem getFileSystem() {
-    return this.fs;
-  }
-
-  /** @return the {@link HRegionInfo} that describe this on-disk region view */
-  public HRegionInfo getRegionInfo() {
-    return this.regionInfo;
-  }
-
-  public HRegionInfo getRegionInfoForFS() {
-    return this.regionInfoForFs;
-  }
-
-  /** @return {@link Path} to the region's root directory. */
-  public Path getTableDir() {
-    return this.tableDir;
-  }
-
-  /** @return {@link Path} to the region directory. */
-  public Path getRegionDir() {
-    return new Path(this.tableDir, this.regionInfoForFs.getEncodedName());
-  }
-
-  // 
===========================================================================
-  //  Temp Helpers
-  // 
===========================================================================
-  /** @return {@link Path} to the region's temp directory, used for file 
creations */
-  Path getTempDir() {
-    return new Path(getRegionDir(), REGION_TEMP_DIR);
-  }
-
-  /**
-   * Clean up any temp detritus that may have been left around from previous 
operation attempts.
-   */
-  void cleanupTempDir() throws IOException {
-    deleteDir(getTempDir());
-  }
-
-  // 
===========================================================================
-  //  Store/StoreFile Helpers
-  // 
===========================================================================
-  /**
-   * Returns the directory path of the specified family
-   * @param familyName Column Family Name
-   * @return {@link Path} to the directory of the specified family
-   */
-  public Path getStoreDir(final String familyName) {
-    return new Path(this.getRegionDir(), familyName);
-  }
-
-  /**
-   * Create the store directory for the specified family name
-   * @param familyName Column Family Name
-   * @return {@link Path} to the directory of the specified family
-   * @throws IOException if the directory creation fails.
-   */
-  Path createStoreDir(final String familyName) throws IOException {
-    Path storeDir = getStoreDir(familyName);
-    if(!fs.exists(storeDir) && !createDir(storeDir))
-      throw new IOException("Failed creating "+storeDir);
-    return storeDir;
-  }
-
-  /**
-   * Returns the store files available for the family.
-   * This methods performs the filtering based on the valid store files.
-   * @param familyName Column Family Name
-   * @return a set of {@link StoreFileInfo} for the specified family.
-   */
-  public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) 
throws IOException {
-    return getStoreFiles(Bytes.toString(familyName));
-  }
-
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName) 
throws IOException {
-    return getStoreFiles(familyName, true);
-  }
-
-  /**
-   * Returns the store files available for the family.
-   * This methods performs the filtering based on the valid store files.
-   * @param familyName Column Family Name
-   * @return a set of {@link StoreFileInfo} for the specified family.
-   */
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName, 
final boolean validate)
-      throws IOException {
-    Path familyDir = getStoreDir(familyName);
-    FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
-    if (files == null) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("No StoreFiles for: " + familyDir);
-      }
-      return null;
-    }
-
-    ArrayList<StoreFileInfo> storeFiles = new 
ArrayList<StoreFileInfo>(files.length);
-    for (FileStatus status: files) {
-      if (validate && !StoreFileInfo.isValid(status)) {
-        LOG.warn("Invalid StoreFile: " + status.getPath());
-        continue;
-      }
-      StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, 
regionInfo,
-        regionInfoForFs, familyName, status.getPath());
-      storeFiles.add(info);
-
-    }
-    return storeFiles;
-  }
-
-  /**
-   * Return Qualified Path of the specified family/file
-   *
-   * @param familyName Column Family Name
-   * @param fileName File Name
-   * @return The qualified Path for the specified family/file
-   */
-  Path getStoreFilePath(final String familyName, final String fileName) {
-    Path familyDir = getStoreDir(familyName);
-    return new Path(familyDir, fileName).makeQualified(this.fs);
-  }
-
-  /**
-   * Return the store file information of the specified family/file.
-   *
-   * @param familyName Column Family Name
-   * @param fileName File Name
-   * @return The {@link StoreFileInfo} for the specified family/file
-   */
-  StoreFileInfo getStoreFileInfo(final String familyName, final String 
fileName)
-      throws IOException {
-    Path familyDir = getStoreDir(familyName);
-    return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
-      regionInfoForFs, familyName, new Path(familyDir, fileName));
-  }
-
-  /**
-   * Returns true if the specified family has reference files
-   * @param familyName Column Family Name
-   * @return true if family contains reference files
-   * @throws IOException
-   */
-  public boolean hasReferences(final String familyName) throws IOException {
-    FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName));
-    if (files != null) {
-      for(FileStatus stat: files) {
-        if(stat.isDirectory()) {
-          continue;
-        }
-        if(StoreFileInfo.isReference(stat.getPath())) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Check whether region has Reference file
-   * @param htd table desciptor of the region
-   * @return true if region has reference file
-   * @throws IOException
-   */
-  public boolean hasReferences(final HTableDescriptor htd) throws IOException {
-    for (HColumnDescriptor family : htd.getFamilies()) {
-      if (hasReferences(family.getNameAsString())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * @return the set of families present on disk
-   * @throws IOException
-   */
-  public Collection<String> getFamilies() throws IOException {
-    FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new 
FSUtils.FamilyDirFilter(fs));
-    if (fds == null) return null;
-
-    ArrayList<String> families = new ArrayList<String>(fds.length);
-    for (FileStatus status: fds) {
-      families.add(status.getPath().getName());
-    }
-
-    return families;
-  }
-
-  /**
-   * Remove the region family from disk, archiving the store files.
-   * @param familyName Column Family Name
-   * @throws IOException if an error occours during the archiving
-   */
-  public void deleteFamily(final String familyName) throws IOException {
-    // archive family store files
-    HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, 
Bytes.toBytes(familyName));
-
-    // delete the family folder
-    Path familyDir = getStoreDir(familyName);
-    if(fs.exists(familyDir) && !deleteDir(familyDir))
-      throw new IOException("Could not delete family " + familyName
-          + " from FileSystem for region " + 
regionInfoForFs.getRegionNameAsString() + "("
-          + regionInfoForFs.getEncodedName() + ")");
-  }
-
-  /**
-   * Generate a unique file name, used by createTempName() and 
commitStoreFile()
-   * @param suffix extra information to append to the generated name
-   * @return Unique file name
-   */
-  private static String generateUniqueName(final String suffix) {
-    String name = UUID.randomUUID().toString().replaceAll("-", "");
-    if (suffix != null) name += suffix;
-    return name;
-  }
-
-  /**
-   * Generate a unique temporary Path. Used in conjuction with 
commitStoreFile()
-   * to get a safer file creation.
-   * <code>
-   * Path file = fs.createTempName();
-   * ...StoreFile.Writer(file)...
-   * fs.commitStoreFile("family", file);
-   * </code>
-   *
-   * @return Unique {@link Path} of the temporary file
-   */
-  public Path createTempName() {
-    return createTempName(null);
-  }
-
-  /**
-   * Generate a unique temporary Path. Used in conjuction with 
commitStoreFile()
-   * to get a safer file creation.
-   * <code>
-   * Path file = fs.createTempName();
-   * ...StoreFile.Writer(file)...
-   * fs.commitStoreFile("family", file);
-   * </code>
-   *
-   * @param suffix extra information to append to the generated name
-   * @return Unique {@link Path} of the temporary file
-   */
-  public Path createTempName(final String suffix) {
-    return new Path(getTempDir(), generateUniqueName(suffix));
-  }
-
-  /**
-   * Move the file from a build/temp location to the main family store 
directory.
-   * @param familyName Family that will gain the file
-   * @param buildPath {@link Path} to the file to commit.
-   * @return The new {@link Path} of the committed file
-   * @throws IOException
-   */
-  public Path commitStoreFile(final String familyName, final Path buildPath) 
throws IOException {
-    return commitStoreFile(familyName, buildPath, -1, false);
-  }
-
-  /**
-   * Move the file from a build/temp location to the main family store 
directory.
-   * @param familyName Family that will gain the file
-   * @param buildPath {@link Path} to the file to commit.
-   * @param seqNum Sequence Number to append to the file name (less then 0 if 
no sequence number)
-   * @param generateNewName False if you want to keep the buildPath name
-   * @return The new {@link Path} of the committed file
-   * @throws IOException
-   */
-  private Path commitStoreFile(final String familyName, final Path buildPath,
-      final long seqNum, final boolean generateNewName) throws IOException {
-    Path storeDir = getStoreDir(familyName);
-    if(!fs.exists(storeDir) && !createDir(storeDir))
-      throw new IOException("Failed creating " + storeDir);
-
-    String name = buildPath.getName();
-    if (generateNewName) {
-      name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + 
"_");
-    }
-    Path dstPath = new Path(storeDir, name);
-    if (!fs.exists(buildPath)) {
-      throw new FileNotFoundException(buildPath.toString());
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Committing store file " + buildPath + " as " + dstPath);
-    }
-    // buildPath exists, therefore not doing an exists() check.
-    if (!rename(buildPath, dstPath)) {
-      throw new IOException("Failed rename of " + buildPath + " to " + 
dstPath);
-    }
-    return dstPath;
-  }
-
-
-  /**
-   * Moves multiple store files to the relative region's family store 
directory.
-   * @param storeFiles list of store files divided by family
-   * @throws IOException
-   */
-  void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws 
IOException {
-    for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
-      String familyName = Bytes.toString(es.getKey());
-      for (StoreFile sf: es.getValue()) {
-        commitStoreFile(familyName, sf.getPath());
-      }
-    }
-  }
-
-  /**
-   * Archives the specified store file from the specified family.
-   * @param familyName Family that contains the store files
-   * @param filePath {@link Path} to the store file to remove
-   * @throws IOException if the archiving fails
-   */
-  public void removeStoreFile(final String familyName, final Path filePath)
-      throws IOException {
-    HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
-        this.tableDir, Bytes.toBytes(familyName), filePath);
-  }
-
-  /**
-   * Closes and archives the specified store files from the specified family.
-   * @param familyName Family that contains the store files
-   * @param storeFiles set of store files to remove
-   * @throws IOException if the archiving fails
-   */
-  public void removeStoreFiles(final String familyName, final 
Collection<StoreFile> storeFiles)
-      throws IOException {
-    HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
-        this.tableDir, Bytes.toBytes(familyName), storeFiles);
-  }
-
-  /**
-   * Bulk load: Add a specified store file to the specified family.
-   * If the source file is on the same different file-system is moved from the
-   * source location to the destination location, otherwise is copied over.
-   *
-   * @param familyName Family that will gain the file
-   * @param srcPath {@link Path} to the file to import
-   * @param seqNum Bulk Load sequence number
-   * @return The destination {@link Path} of the bulk loaded file
-   * @throws IOException
-   */
-  Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
-      throws IOException {
-    // Copy the file if it's on another filesystem
-    FileSystem srcFs = srcPath.getFileSystem(conf);
-    FileSystem desFs = fs instanceof HFileSystem ? 
((HFileSystem)fs).getBackingFs() : fs;
-
-    // We can't compare FileSystem instances as equals() includes UGI instance
-    // as part of the comparison and won't work when doing SecureBulkLoad
-    // TODO deal with viewFS
-    if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
-      LOG.info("Bulk-load file " + srcPath + " is on different filesystem than 
" +
-          "the destination store. Copying file over to destination 
filesystem.");
-      Path tmpPath = createTempName();
-      FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
-      LOG.info("Copied " + srcPath + " to temporary path on destination 
filesystem: " + tmpPath);
-      srcPath = tmpPath;
-    }
-
-    return commitStoreFile(familyName, srcPath, seqNum, true);
-  }
-
-  // 
===========================================================================
-  //  Splits Helpers
-  // 
===========================================================================
-  /** @return {@link Path} to the temp directory used during split operations 
*/
-  Path getSplitsDir() {
-    return new Path(getRegionDir(), REGION_SPLITS_DIR);
-  }
-
-  Path getSplitsDir(final HRegionInfo hri) {
-    return new Path(getSplitsDir(), hri.getEncodedName());
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous 
split attempts.
-   */
-  void cleanupSplitsDir() throws IOException {
-    deleteDir(getSplitsDir());
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous
-   * split attempts.
-   * Call this method on initial region deploy.
-   * @throws IOException
-   */
-  void cleanupAnySplitDetritus() throws IOException {
-    Path splitdir = this.getSplitsDir();
-    if (!fs.exists(splitdir)) return;
-    // Look at the splitdir.  It could have the encoded names of the daughter
-    // regions we tried to make.  See if the daughter regions actually got made
-    // out under the tabledir.  If here under splitdir still, then the split 
did
-    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
-    // where we successfully created daughter a but regionserver crashed during
-    // the creation of region b.  In this case, there'll be an orphan daughter
-    // dir in the filesystem.  TOOD: Fix.
-    FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new 
FSUtils.DirFilter(fs));
-    if (daughters != null) {
-      for (FileStatus daughter: daughters) {
-        Path daughterDir = new Path(getTableDir(), 
daughter.getPath().getName());
-        if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
-          throw new IOException("Failed delete of " + daughterDir);
-        }
-      }
-    }
-    cleanupSplitsDir();
-    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
-  }
-
-  /**
-   * Remove daughter region
-   * @param regionInfo daughter {@link HRegionInfo}
-   * @throws IOException
-   */
-  void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
-    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
-    if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Commit a daughter region, moving it from the split temporary directory
-   * to the proper location in the filesystem.
-   *
-   * @param regionInfo                 daughter {@link 
org.apache.hadoop.hbase.HRegionInfo}
-   * @throws IOException
-   */
-  Path commitDaughterRegion(final HRegionInfo regionInfo)
-      throws IOException {
-    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
-    Path daughterTmpDir = this.getSplitsDir(regionInfo);
-
-    if (fs.exists(daughterTmpDir)) {
-
-      // Write HRI to a file in case we need to recover hbase:meta
-      Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
-      byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
-      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
-
-      // Move the daughter temp dir to the table dir
-      if (!rename(daughterTmpDir, regionDir)) {
-        throw new IOException("Unable to rename " + daughterTmpDir + " to " + 
regionDir);
-      }
-    }
-
-    return regionDir;
-  }
-
-  /**
-   * Create the region splits directory.
-   */
-  void createSplitsDir() throws IOException {
-    Path splitdir = getSplitsDir();
-    if (fs.exists(splitdir)) {
-      LOG.info("The " + splitdir + " directory exists.  Hence deleting it to 
recreate it");
-      if (!deleteDir(splitdir)) {
-        throw new IOException("Failed deletion of " + splitdir
-            + " before creating them again.");
-      }
-    }
-    // splitDir doesn't exists now. No need to do an exists() call for it.
-    if (!createDir(splitdir)) {
-      throw new IOException("Failed create of " + splitdir);
-    }
-  }
-
-  /**
-   * Write out a split reference. Package local so it doesnt leak out of
-   * regionserver.
-   * @param hri {@link HRegionInfo} of the destination
-   * @param familyName Column Family Name
-   * @param f File to split.
-   * @param splitRow Split Row
-   * @param top True if we are referring to the top half of the hfile.
-   * @param splitPolicy
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  Path splitStoreFile(final HRegionInfo hri, final String familyName, final 
StoreFile f,
-      final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy)
-          throws IOException {
-    if (splitPolicy == null || 
!splitPolicy.skipStoreFileRangeCheck(familyName)) {
-      // Check whether the split row lies in the range of the store file
-      // If it is outside the range, return directly.
-      try {
-        if (top) {
-          //check if larger than last key.
-          KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
-          Cell lastKey = f.getLastKey();
-          // If lastKey is null means storefile is empty.
-          if (lastKey == null) {
-            return null;
-          }
-          if (f.getComparator().compare(splitKey, lastKey) > 0) {
-            return null;
-          }
-        } else {
-          //check if smaller than first key
-          KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
-          Cell firstKey = f.getFirstKey();
-          // If firstKey is null means storefile is empty.
-          if (firstKey == null) {
-            return null;
-          }
-          if (f.getComparator().compare(splitKey, firstKey) < 0) {
-            return null;
-          }
-        }
-      } finally {
-        f.closeReader(f.getCacheConf() != null ? 
f.getCacheConf().shouldEvictOnClose() : true);
-      }
-    }
-
-    Path splitDir = new Path(getSplitsDir(hri), familyName);
-    // A reference to the bottom half of the hsf store file.
-    Reference r =
-      top ? Reference.createTopReference(splitRow): 
Reference.createBottomReference(splitRow);
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above.  The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String parentRegionName = regionInfoForFs.getEncodedName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    Path p = new Path(splitDir, f.getPath().getName() + "." + 
parentRegionName);
-    return r.write(fs, p);
-  }
-
-  // 
===========================================================================
-  //  Merge Helpers
-  // 
===========================================================================
-  /** @return {@link Path} to the temp directory used during merge operations 
*/
-  Path getMergesDir() {
-    return new Path(getRegionDir(), REGION_MERGES_DIR);
-  }
-
-  Path getMergesDir(final HRegionInfo hri) {
-    return new Path(getMergesDir(), hri.getEncodedName());
-  }
-
-  /**
-   * Clean up any merge detritus that may have been left around from previous 
merge attempts.
-   */
-  void cleanupMergesDir() throws IOException {
-    deleteDir(getMergesDir());
-  }
-
-  /**
-   * Remove merged region
-   * @param mergedRegion {@link HRegionInfo}
-   * @throws IOException
-   */
-  void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
-    Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
-    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Create the region merges directory.
-   * @throws IOException If merges dir already exists or we fail to create it.
-   * @see HRegionFileSystem#cleanupMergesDir()
-   */
-  void createMergesDir() throws IOException {
-    Path mergesdir = getMergesDir();
-    if (fs.exists(mergesdir)) {
-      LOG.info("The " + mergesdir
-          + " directory exists.  Hence deleting it to recreate it");
-      if (!fs.delete(mergesdir, true)) {
-        throw new IOException("Failed deletion of " + mergesdir
-            + " before creating them again.");
-      }
-    }
-    if (!fs.mkdirs(mergesdir))
-      throw new IOException("Failed create of " + mergesdir);
-  }
-
-  /**
-   * Write out a merge reference under the given merges directory. Package 
local
-   * so it doesnt leak out of regionserver.
-   * @param mergedRegion {@link HRegionInfo} of the merged region
-   * @param familyName Column Family Name
-   * @param f File to create reference.
-   * @param mergedDir
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
-      final StoreFile f, final Path mergedDir)
-      throws IOException {
-    Path referenceDir = new Path(new Path(mergedDir,
-        mergedRegion.getEncodedName()), familyName);
-    // A whole reference to the store file.
-    Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above. The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String mergingRegionName = regionInfoForFs.getEncodedName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    Path p = new Path(referenceDir, f.getPath().getName() + "."
-        + mergingRegionName);
-    return r.write(fs, p);
-  }
-
-  /**
-   * Commit a merged region, moving it from the merges temporary directory to
-   * the proper location in the filesystem.
-   * @param mergedRegionInfo merged region {@link HRegionInfo}
-   * @throws IOException
-   */
-  void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws 
IOException {
-    Path regionDir = new Path(this.tableDir, 
mergedRegionInfo.getEncodedName());
-    Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
-    // Move the tmp dir in the expected location
-    if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
-      if (!fs.rename(mergedRegionTmpDir, regionDir)) {
-        throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
-            + regionDir);
-      }
-    }
-  }
-
-  // 
===========================================================================
-  //  Create/Open/Delete Helpers
-  // 
===========================================================================
-  /**
-   * Log the current state of the region
-   * @param LOG log to output information
-   * @throws IOException if an unexpected exception occurs
-   */
-  void logFileSystemState(final Log LOG) throws IOException {
-    FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
-  }
-
-  /**
-   * @param hri
-   * @return Content of the file we write out to the filesystem under a region
-   * @throws IOException
-   */
-  private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws 
IOException {
-    return hri.toDelimitedByteArray();
-  }
-
-  /**
-   * Create a {@link HRegionInfo} from the serialized version on-disk.
-   * @param fs {@link FileSystem} that contains the Region Info file
-   * @param regionDir {@link Path} to the Region Directory that contains the 
Info file
-   * @return An {@link HRegionInfo} instance gotten from the Region Info file.
-   * @throws IOException if an error occurred during file open/read operation.
-   */
-  public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, 
final Path regionDir)
-      throws IOException {
-    FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
-    try {
-      return HRegionInfo.parseFrom(in);
-    } finally {
-      in.close();
-    }
-  }
-
-  /**
-   * Write the .regioninfo file on-disk.
-   */
-  private static void writeRegionInfoFileContent(final Configuration conf, 
final FileSystem fs,
-      final Path regionInfoFile, final byte[] content) throws IOException {
-    // First check to get the permissions
-    FsPermission perms = FSUtils.getFilePermissions(fs, conf, 
HConstants.DATA_FILE_UMASK_KEY);
-    // Write the RegionInfo file content
-    FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, 
null);
-    try {
-      out.write(content);
-    } finally {
-      out.close();
-    }
-  }
-
-  /**
-   * Write out an info file under the stored region directory. Useful 
recovering mangled regions.
-   * If the regionInfo already exists on-disk, then we fast exit.
-   */
-  void checkRegionInfoOnFilesystem() throws IOException {
-    // Compose the content of the file so we can compare to length in 
filesystem. If not same,
-    // rewrite it (it may have been written in the old format using Writables 
instead of pb). The
-    // pb version is much shorter -- we write now w/o the toString version -- 
so checking length
-    // only should be sufficient. I don't want to read the file every time to 
check if it pb
-    // serialized.
-    byte[] content = getRegionInfoFileContent(regionInfoForFs);
-    try {
-      Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
-
-      FileStatus status = fs.getFileStatus(regionInfoFile);
-      if (status != null && status.getLen() == content.length) {
-        // Then assume the content good and move on.
-        // NOTE: that the length is not sufficient to define the the content 
matches.
-        return;
-      }
-
-      LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
-      if (!fs.delete(regionInfoFile, false)) {
-        throw new IOException("Unable to remove existing " + regionInfoFile);
-      }
-    } catch (FileNotFoundException e) {
-      LOG.warn(REGION_INFO_FILE + " file not found for region: " + 
regionInfoForFs.getEncodedName() +
-          " on table " + regionInfo.getTable());
-    }
-
-    // Write HRI to a file in case we need to recover hbase:meta
-    writeRegionInfoOnFilesystem(content, true);
-  }
-
-  /**
-   * Write out an info file under the region directory. Useful recovering 
mangled regions.
-   * @param useTempDir indicate whether or not using the region .tmp dir for a 
safer file creation.
-   */
-  private void writeRegionInfoOnFilesystem(boolean useTempDir) throws 
IOException {
-    byte[] content = getRegionInfoFileContent(regionInfoForFs);
-    writeRegionInfoOnFilesystem(content, useTempDir);
-  }
-
-  /**
-   * Write out an info file under the region directory. Useful recovering 
mangled regions.
-   * @param regionInfoContent serialized version of the {@link HRegionInfo}
-   * @param useTempDir indicate whether or not using the region .tmp dir for a 
safer file creation.
-   */
-  private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
-      final boolean useTempDir) throws IOException {
-    Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
-    if (useTempDir) {
-      // Create in tmpDir and then move into place in case we crash after
-      // create but before close. If we don't successfully close the file,
-      // subsequent region reopens will fail the below because create is
-      // registered in NN.
-
-      // And then create the file
-      Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
-
-      // If datanode crashes or if the RS goes down just before the close is 
called while trying to
-      // close the created regioninfo file in the .tmp directory then on next
-      // creation we will be getting AlreadyCreatedException.
-      // Hence delete and create the file if exists.
-      if (FSUtils.isExists(fs, tmpPath)) {
-        FSUtils.delete(fs, tmpPath, true);
-      }
-
-      // Write HRI to a file in case we need to recover hbase:meta
-      writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
-
-      // Move the created file to the original path
-      if (fs.exists(tmpPath) &&  !rename(tmpPath, regionInfoFile)) {
-        throw new IOException("Unable to rename " + tmpPath + " to " + 
regionInfoFile);
-      }
-    } else {
-      // Write HRI to a file in case we need to recover hbase:meta
-      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
-    }
-  }
-
-  /**
-   * Create a new Region on file-system.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to add the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be added
-   * @throws IOException if the region creation fails due to a FileSystem 
exception.
-   */
-  public static HRegionFileSystem createRegionOnFileSystem(final Configuration 
conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) 
throws IOException {
-    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, 
regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (fs.exists(regionDir)) {
-      LOG.warn("Trying to create a region that already exists on disk: " + 
regionDir);
-      throw new IOException("The specified region already exists on disk: " + 
regionDir);
-    }
-
-    // Create the region directory
-    if (!createDirOnFileSystem(fs, conf, regionDir)) {
-      LOG.warn("Unable to create the region directory: " + regionDir);
-      throw new IOException("Unable to create region directory: " + regionDir);
-    }
-
-    // Write HRI to a file in case we need to recover hbase:meta
-    regionFs.writeRegionInfoOnFilesystem(false);
-    return regionFs;
-  }
-
-  /**
-   * Open Region from file-system.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to add the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be added
-   * @param readOnly True if you don't want to edit the region data
-   * @throws IOException if the region creation fails due to a FileSystem 
exception.
-   */
-  public static HRegionFileSystem openRegionFromFileSystem(final Configuration 
conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, 
boolean readOnly)
-      throws IOException {
-    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, 
regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (!fs.exists(regionDir)) {
-      LOG.warn("Trying to open a region that do not exists on disk: " + 
regionDir);
-      throw new IOException("The specified region do not exists on disk: " + 
regionDir);
-    }
-
-    if (!readOnly) {
-      // Cleanup temporary directories
-      regionFs.cleanupTempDir();
-      regionFs.cleanupSplitsDir();
-      regionFs.cleanupMergesDir();
-
-      // if it doesn't exists, Write HRI to a file, in case we need to recover 
hbase:meta
-      regionFs.checkRegionInfoOnFilesystem();
-    }
-
-    return regionFs;
-  }
-
-  /**
-   * Remove the region from the table directory, archiving the region's hfiles.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to remove the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be deleted
-   * @throws IOException if the request cannot be completed
-   */
-  public static void deleteRegionFromFileSystem(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) 
throws IOException {
-    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, 
regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (!fs.exists(regionDir)) {
-      LOG.warn("Trying to delete a region that do not exists on disk: " + 
regionDir);
-      return;
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETING region " + regionDir);
-    }
-
-    // Archive region
-    Path rootDir = FSUtils.getRootDir(conf);
-    HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
-
-    // Delete empty region dir
-    if (!fs.delete(regionDir, true)) {
-      LOG.warn("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Creates a directory. Assumes the user has already checked for this 
directory existence.
-   * @param dir
-   * @return the result of fs.mkdirs(). In case underlying fs throws an 
IOException, it checks
-   *         whether the directory exists or not, and returns true if it 
exists.
-   * @throws IOException
-   */
-  boolean createDir(Path dir) throws IOException {
-    int i = 0;
-    IOException lastIOE = null;
-    do {
-      try {
-        return fs.mkdirs(dir);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (fs.exists(dir)) return true; // directory is present
-        try {
-          sleepBeforeRetry("Create Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-    throw new IOException("Exception in createDir", lastIOE);
-  }
-
-  /**
-   * Renames a directory. Assumes the user has already checked for this 
directory existence.
-   * @param srcpath
-   * @param dstPath
-   * @return true if rename is successful.
-   * @throws IOException
-   */
-  boolean rename(Path srcpath, Path dstPath) throws IOException {
-    IOException lastIOE = null;
-    int i = 0;
-    do {
-      try {
-        return fs.rename(srcpath, dstPath);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // 
successful move
-        // dir is not there, retry after some time.
-        try {
-          sleepBeforeRetry("Rename Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in rename", lastIOE);
-  }
-
-  /**
-   * Deletes a directory. Assumes the user has already checked for this 
directory existence.
-   * @param dir
-   * @return true if the directory is deleted.
-   * @throws IOException
-   */
-  boolean deleteDir(Path dir) throws IOException {
-    IOException lastIOE = null;
-    int i = 0;
-    do {
-      try {
-        return fs.delete(dir, true);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (!fs.exists(dir)) return true;
-        // dir is there, retry deleting after some time.
-        try {
-          sleepBeforeRetry("Delete Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in DeleteDir", lastIOE);
-  }
-
-  /**
-   * sleeping logic; handles the interrupt exception.
-   */
-  private void sleepBeforeRetry(String msg, int sleepMultiplier) throws 
InterruptedException {
-    sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, 
hdfsClientRetriesNumber);
-  }
-
-  /**
-   * Creates a directory for a filesystem and configuration object. Assumes 
the user has already
-   * checked for this directory existence.
-   * @param fs
-   * @param conf
-   * @param dir
-   * @return the result of fs.mkdirs(). In case underlying fs throws an 
IOException, it checks
-   *         whether the directory exists or not, and returns true if it 
exists.
-   * @throws IOException
-   */
-  private static boolean createDirOnFileSystem(FileSystem fs, Configuration 
conf, Path dir)
-      throws IOException {
-    int i = 0;
-    IOException lastIOE = null;
-    int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
-      DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
-    int baseSleepBeforeRetries = 
conf.getInt("hdfs.client.sleep.before.retries",
-      DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
-    do {
-      try {
-        return fs.mkdirs(dir);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (fs.exists(dir)) return true; // directory is present
-        try {
-          sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, 
hdfsClientRetriesNumber);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in createDir", lastIOE);
-  }
-
-  /**
-   * sleeping logic for static methods; handles the interrupt exception. 
Keeping a static version
-   * for this to avoid re-looking for the integer values.
-   */
-  private static void sleepBeforeRetry(String msg, int sleepMultiplier, int 
baseSleepBeforeRetries,
-      int hdfsClientRetriesNumber) throws InterruptedException {
-    if (sleepMultiplier > hdfsClientRetriesNumber) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(msg + ", retries exhausted");
-      }
-      return;
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + 
sleepMultiplier);
-    }
-    Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index c4bd849..e3b1a85 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.crypto.Encryption;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -127,7 +128,7 @@ public class HStore implements Store {
   // This stores directory in the filesystem.
   protected final HRegion region;
   private final HColumnDescriptor family;
-  private final HRegionFileSystem fs;
+  private final RegionFileSystem fs;
   protected Configuration conf;
   protected CacheConfig cacheConf;
   private long lastCompactSize = 0;
@@ -335,7 +336,7 @@ public class HStore implements Store {
     return this.fs.getFileSystem();
   }
 
-  public HRegionFileSystem getRegionFileSystem() {
+  public RegionFileSystem getRegionFileSystem() {
     return this.fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
index 4a3f52f..3baa4d9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.security.User;
  * transaction, {@link #execute(Server, RegionServerServices)} to run the
  * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup 
if
  * execute fails.
- * 
+ *
  * <p>Here is an example of how you would use this interface:
  * <pre>
  *  RegionMergeTransactionFactory factory = new 
RegionMergeTransactionFactory(conf);
@@ -158,7 +158,7 @@ public interface RegionMergeTransaction {
    * @param services
    * @return <code>true</code> if the regions are mergeable else
    *         <code>false</code> if they are not (e.g. its already closed, 
etc.).
-   * @throws IOException 
+   * @throws IOException
    */
   boolean prepare(RegionServerServices services) throws IOException;
 
@@ -217,7 +217,7 @@ public interface RegionMergeTransaction {
   /**
    * Register a listener for transaction preparation, execution, and possibly
    * rollback phases.
-   * <p>A listener can abort a transaction by throwing an exception. 
+   * <p>A listener can abort a transaction by throwing an exception.
    * @param listener the listener
    * @return 'this' for chaining
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
index 9e7f97b..61c3eda 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import 
org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable;
 import org.apache.hadoop.hbase.security.User;
@@ -200,7 +201,7 @@ public class RegionMergeTransactionImpl implements 
RegionMergeTransaction {
           region_a.getRegionInfo().getRegionName());
       if (regionAHasMergeQualifier ||
           hasMergeQualifierInMeta(services, 
region_b.getRegionInfo().getRegionName())) {
-        LOG.debug("Region " + (regionAHasMergeQualifier ? 
+        LOG.debug("Region " + (regionAHasMergeQualifier ?
             region_a.getRegionInfo().getRegionNameAsString()
                 : region_b.getRegionInfo().getRegionNameAsString())
             + " is not mergeable because it has merge qualifier in META");
@@ -556,23 +557,19 @@ public class RegionMergeTransactionImpl implements 
RegionMergeTransaction {
       Map<byte[], List<StoreFile>> hstoreFilesOfRegionB)
       throws IOException {
     // Create reference file(s) of region A in mergdir
-    HRegionFileSystem fs_a = this.region_a.getRegionFileSystem();
-    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionA
-        .entrySet()) {
+    RegionFileSystem fs_a = this.region_a.getRegionFileSystem();
+    for (Map.Entry<byte[], List<StoreFile>> entry : 
hstoreFilesOfRegionA.entrySet()) {
       String familyName = Bytes.toString(entry.getKey());
       for (StoreFile storeFile : entry.getValue()) {
-        fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile,
-            this.mergesdir);
+        fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, 
this.mergesdir);
       }
     }
     // Create reference file(s) of region B in mergedir
-    HRegionFileSystem fs_b = this.region_b.getRegionFileSystem();
-    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionB
-        .entrySet()) {
+    RegionFileSystem fs_b = this.region_b.getRegionFileSystem();
+    for (Map.Entry<byte[], List<StoreFile>> entry : 
hstoreFilesOfRegionB.entrySet()) {
       String familyName = Bytes.toString(entry.getKey());
       for (StoreFile storeFile : entry.getValue()) {
-        fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile,
-            this.mergesdir);
+        fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, 
this.mergesdir);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
index 21c117f..e4aee69 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -662,7 +663,7 @@ public class SplitTransactionImpl implements 
SplitTransaction {
         LOG.debug("Splitting started for store file: " + sf.getPath() + " for 
region: " +
                   this.parent);
     }
-    HRegionFileSystem fs = this.parent.getRegionFileSystem();
+    RegionFileSystem fs = this.parent.getRegionFileSystem();
     String familyName = Bytes.toString(family);
     Path path_a =
         fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false,

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index a8542de..4cde73d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -778,9 +778,9 @@ public class RestoreSnapshotHelper {
     FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new 
FSUtils.RegionDirFilter(fs));
     if (regionDirs == null) return null;
 
-    List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionDirs.length);
-    for (int i = 0; i < regionDirs.length; ++i) {
-      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, 
regionDirs[i].getPath());
+    List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
+    for (FileStatus regionDir: regionDirs) {
+      HRegionInfo hri = RegionFileSystem.loadRegionInfoFileContent(fs, 
regionDir.getPath());
       regions.add(hri);
     }
     LOG.debug("found " + regions.size() + " regions for table=" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 0409441..dfb706b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -51,7 +52,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -250,8 +250,8 @@ public final class SnapshotManifest {
     boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
     try {
       // Open the RegionFS
-      HRegionFileSystem regionFs = 
HRegionFileSystem.openRegionFromFileSystem(conf, fs,
-            tableDir, regionInfo, true);
+      Path rootDir = null;
+      RegionFileSystem regionFs = RegionFileSystem.open(conf, fs, rootDir, 
regionInfo, false);
       monitor.rethrowException();
 
       // 1. dump region meta info into the snapshot directory

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index a5afb91..2c5d76a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -36,9 +36,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -62,8 +62,7 @@ public final class SnapshotManifestV1 {
   private SnapshotManifestV1() {
   }
 
-  static class ManifestBuilder implements SnapshotManifest.RegionVisitor<
-                                                          HRegionFileSystem, 
Path> {
+  static class ManifestBuilder implements 
SnapshotManifest.RegionVisitor<RegionFileSystem, Path> {
     private final Configuration conf;
     private final Path snapshotDir;
     private final FileSystem fs;
@@ -74,24 +73,24 @@ public final class SnapshotManifestV1 {
       this.fs = fs;
     }
 
-    public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws 
IOException {
-      HRegionFileSystem snapshotRegionFs = 
HRegionFileSystem.createRegionOnFileSystem(conf,
-        fs, snapshotDir, regionInfo);
+    public RegionFileSystem regionOpen(final HRegionInfo regionInfo) throws 
IOException {
+      RegionFileSystem snapshotRegionFs = RegionFileSystem.open(conf, fs,
+          snapshotDir, regionInfo, true);
       return snapshotRegionFs;
     }
 
-    public void regionClose(final HRegionFileSystem region) {
+    public void regionClose(final RegionFileSystem region) {
     }
 
-    public Path familyOpen(final HRegionFileSystem snapshotRegionFs, final 
byte[] familyName) {
+    public Path familyOpen(final RegionFileSystem snapshotRegionFs, final 
byte[] familyName) {
       Path familyDir = 
snapshotRegionFs.getStoreDir(Bytes.toString(familyName));
       return familyDir;
     }
 
-    public void familyClose(final HRegionFileSystem region, final Path family) 
{
+    public void familyClose(final RegionFileSystem region, final Path family) {
     }
 
-    public void storeFile(final HRegionFileSystem region, final Path familyDir,
+    public void storeFile(final RegionFileSystem region, final Path familyDir,
         final StoreFileInfo storeFile) throws IOException {
       Path referenceFile = new Path(familyDir, storeFile.getPath().getName());
       boolean success = true;
@@ -126,7 +125,7 @@ public final class SnapshotManifestV1 {
       completionService.submit(new Callable<SnapshotRegionManifest>() {
         @Override
         public SnapshotRegionManifest call() throws IOException {
-          HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, 
region.getPath());
+          HRegionInfo hri = RegionFileSystem.loadRegionInfoFileContent(fs, 
region.getPath());
           return buildManifestFromDisk(conf, fs, snapshotDir, hri);
         }
       });
@@ -156,8 +155,7 @@ public final class SnapshotManifestV1 {
 
   static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf,
       final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) 
throws IOException {
-    HRegionFileSystem regionFs = 
HRegionFileSystem.openRegionFromFileSystem(conf, fs,
-          tableDir, regionInfo, true);
+    RegionFileSystem regionFs = RegionFileSystem.open(conf, fs, tableDir, 
regionInfo, true);
     SnapshotRegionManifest.Builder manifest = 
SnapshotRegionManifest.newBuilder();
 
     // 1. dump region meta info into the snapshot directory

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index e1a3fa2..2f34291 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -111,7 +111,9 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.legacy.LegacyLayout;
 import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -120,7 +122,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
@@ -236,6 +237,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
   // successful
   private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);
 
+  private final MasterFileSystem mfs;
+
   /***********
    * Options
    ***********/
@@ -348,8 +351,18 @@ public class HBaseFsck extends Configured implements 
Closeable {
   public HBaseFsck(Configuration conf, ExecutorService exec) throws 
MasterNotRunningException,
       ZooKeeperConnectionException, IOException, ClassNotFoundException {
     super(conf);
+    // make a copy, just to be sure we're not overriding someone else's config
+    setConf(HBaseConfiguration.create(getConf()));
+    // disable blockcache for tool invocation, see HBASE-10500
+    getConf().setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
+    // Disable usage of meta replicas in hbck
+    getConf().setBoolean(HConstants.USE_META_REPLICAS, false);
+
+    mfs = MasterFileSystem.open(getConf(), false);
+
     errors = getErrorReporter(getConf());
     this.executor = exec;
+
     lockFileRetryCounterFactory = new RetryCounterFactory(
       getConf().getInt("hbase.hbck.lockfile.attempts", 
DEFAULT_MAX_LOCK_FILE_ATTEMPTS),
       getConf().getInt(
@@ -1075,7 +1088,6 @@ public class HBaseFsck extends Configured implements 
Closeable {
   private void offlineReferenceFileRepair() throws IOException {
     clearState();
     LOG.info("Validating mapping using HDFS state");
-    final MasterFileSystem mfs = MasterFileSystem.open(getConf(), false);
     mfs.visitStoreFiles(new StoreFileVisitor() {
       @Override
       public void storeFile(HRegionInfo region, String family, StoreFileInfo 
storeFile)
@@ -1179,7 +1191,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
     }
 
     FileSystem fs = FileSystem.get(getConf());
-    HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, 
regionDir);
+    HRegionInfo hri = RegionFileSystem.loadRegionInfoFileContent(fs, 
regionDir);
     LOG.debug("HRegionInfo read: " + hri.toString());
     hbi.hdfsEntry.hri = hri;
   }
@@ -1412,7 +1424,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
         Collections.<WALActionsListener>singletonList(new MetricsWAL()),
         "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8))).
         getWAL(metaHRI.getEncodedNameAsBytes(), 
metaHRI.getTable().getNamespace());
-    HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, 
wal);
+    HRegion meta = HRegion.createHRegion(c, rootdir, metaDescriptor, metaHRI, 
wal);
     MetaUtils.setInfoFamilyCachingForMeta(metaDescriptor, true);
     return meta;
   }
@@ -2349,8 +2361,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
                   LOG.warn(hri + " start and stop keys are in the range of " + 
region
                       + ". The region might not be cleaned up from hdfs when 
region " + region
                       + " split failed. Hence deleting from hdfs.");
-                  HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs,
-                    regionDir.getParent(), hri);
+                  RegionFileSystem.destroy(getConf(), fs, 
regionDir.getParent(), hri);
                   return;
                 }
               }
@@ -2612,7 +2623,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
       Path src = cf.getPath();
       Path dst =  new Path(targetRegionDir, src.getName());
 
-      if (src.getName().equals(HRegionFileSystem.REGION_INFO_FILE)) {
+      if (src.getName().equals(LegacyLayout.REGION_INFO_FILE)) {
         // do not copy the old .regioninfo file.
         continue;
       }
@@ -4135,7 +4146,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
               try {
                 LOG.debug("Loading region info from hdfs:"+ 
regionDir.getPath());
 
-                Path regioninfoFile = new Path(regionDir.getPath(), 
HRegionFileSystem.REGION_INFO_FILE);
+                Path regioninfoFile = new Path(he.hdfsRegionDir, 
LegacyLayout.REGION_INFO_FILE);
                 boolean regioninfoFileExists = fs.exists(regioninfoFile);
 
                 if (!regioninfoFileExists) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index eaf8d54..a07fed9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -191,7 +191,7 @@ public class HBaseFsckRepair {
       HRegionInfo hri, HTableDescriptor htd) throws IOException {
     // Create HRegion
     Path root = FSUtils.getRootDir(conf);
-    HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
+    HRegion region = HRegion.createHRegion(conf, root, htd, hri, null);
 
     // Close the new region to flush to disk. Close log file too.
     region.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index a936fc2..4fe39ff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -170,7 +170,7 @@ public abstract class ModifyRegionUtils {
     // unless I pass along via the conf.
     Configuration confForWAL = new Configuration(conf);
     confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
-    HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, 
hTableDescriptor, null, false);
+    HRegion region = HRegion.createHRegion(conf, rootDir, hTableDescriptor, 
newRegion, null, false);
     try {
       // 2. Custom user code to interact with the created region
       if (task != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index a6f70c3..cbc57dc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -412,7 +412,7 @@ public class RegionSplitter {
    * Alternative getCurrentNrHRS which is no longer available.
    * @param connection
    * @return Rough count of regionservers out on cluster.
-   * @throws IOException 
+   * @throws IOException
    */
   private static int getRegionServerCount(final Connection connection) throws 
IOException {
     try (Admin admin = connection.getAdmin()) {
@@ -727,8 +727,8 @@ public class RegionSplitter {
             if (sk.length == 0)
               sk = splitAlgo.firstRow();
 
-            HRegionFileSystem regionFs = 
HRegionFileSystem.openRegionFromFileSystem(
-                connection.getConfiguration(), fs, tableDir, hri, true);
+            RegionFileSystem regionFs = RegionFileSystem.open(
+              connection.getConfiguration(), fs, tableDir, hri, true);
 
             // Check every Column Family for that region -- check does not 
have references.
             boolean refFound = false;
@@ -767,7 +767,7 @@ public class RegionSplitter {
    * @param conf
    * @param tableName
    * @return A Pair where first item is table dir and second is the split file.
-   * @throws IOException 
+   * @throws IOException
    */
   private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration 
conf,
       final TableName tableName)
@@ -785,7 +785,7 @@ public class RegionSplitter {
       getTableDirAndSplitFile(connection.getConfiguration(), tableName);
     Path tableDir = tableDirAndSplitFile.getFirst();
     Path splitFile = tableDirAndSplitFile.getSecond();
- 
+
     FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
 
     // Using strings because (new byte[]{0}).equals(new byte[]{0}) == false

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 8335f4f..30ee2d8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1815,7 +1815,7 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
    */
   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, 
WAL wal)
       throws IOException {
-    return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), 
desc, wal);
+    return HRegion.createHRegion(getConfiguration(), getDataTestDir(), desc, 
info, wal);
   }
 
   /**
@@ -1828,7 +1828,7 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
    * @param families
    * @throws IOException
    * @return A region on which you must call
-             {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
    * @deprecated use
    * {@link #createLocalHRegion(TableName, byte[], byte[], boolean, 
Durability, WAL, byte[]...)}
    */
@@ -2347,7 +2347,7 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
       final Configuration conf, final HTableDescriptor htd, boolean initialize)
       throws IOException {
     WAL wal = createWal(conf, rootDir, info);
-    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
+    return HRegion.createHRegion(conf, rootDir, htd, info, wal, initialize);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
index 0217b41..6c2c882 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@@ -93,11 +94,9 @@ public class TestIOFencing {
     volatile CountDownLatch compactionsBlocked = new CountDownLatch(0);
     volatile CountDownLatch compactionsWaiting = new CountDownLatch(0);
 
-    @SuppressWarnings("deprecation")
-    public CompactionBlockerRegion(Path tableDir, WAL log,
-        FileSystem fs, Configuration confParam, HRegionInfo info,
-        HTableDescriptor htd, RegionServerServices rsServices) {
-      super(tableDir, log, fs, confParam, info, htd, rsServices);
+    public CompactionBlockerRegion(final RegionFileSystem rfs, final 
HTableDescriptor htd,
+        final WAL wal, final RegionServerServices rsServices) {
+      super(rfs, htd, wal, rsServices);
     }
 
     public void stopCompactions() {
@@ -154,11 +153,11 @@ public class TestIOFencing {
    */
   public static class BlockCompactionsInPrepRegion extends 
CompactionBlockerRegion {
 
-    public BlockCompactionsInPrepRegion(Path tableDir, WAL log,
-        FileSystem fs, Configuration confParam, HRegionInfo info,
-        HTableDescriptor htd, RegionServerServices rsServices) {
-      super(tableDir, log, fs, confParam, info, htd, rsServices);
+    public BlockCompactionsInPrepRegion(final RegionFileSystem rfs, final 
HTableDescriptor htd,
+        final WAL wal, final RegionServerServices rsServices) {
+      super(rfs, htd, wal, rsServices);
     }
+
     @Override
     protected void doRegionCompactionPrep() throws IOException {
       compactionsWaiting.countDown();
@@ -177,11 +176,11 @@ public class TestIOFencing {
    * entry to go the WAL before blocking, but blocks afterwards
    */
   public static class BlockCompactionsInCompletionRegion extends 
CompactionBlockerRegion {
-    public BlockCompactionsInCompletionRegion(Path tableDir, WAL log,
-        FileSystem fs, Configuration confParam, HRegionInfo info,
-        HTableDescriptor htd, RegionServerServices rsServices) {
-      super(tableDir, log, fs, confParam, info, htd, rsServices);
+    public BlockCompactionsInCompletionRegion(final RegionFileSystem rfs,
+        final HTableDescriptor htd, final WAL wal, final RegionServerServices 
rsServices) {
+      super(rfs, htd, wal, rsServices);
     }
+
     @Override
     protected HStore instantiateHStore(final HColumnDescriptor family) throws 
IOException {
       return new BlockCompactionsInCompletionHStore(this, family, this.conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 53de8a0..a5b0002 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -31,11 +31,14 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.fs.MasterFileSystem;
+import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -302,13 +305,14 @@ public class TestRestoreSnapshotFromClient {
 
   private Set<String> getFamiliesFromFS(final TableName tableName) throws 
IOException {
     MasterFileSystem mfs = 
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
-    Set<String> families = new HashSet<String>();
-    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) 
{
-      for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), 
regionDir)) {
-        families.add(familyDir.getName());
+    final Set<String> families = new HashSet<String>();
+    mfs.visitStoreFiles(tableName, new StoreFileVisitor() {
+      @Override
+      public void storeFile(HRegionInfo region, String family, StoreFileInfo 
storeFile)
+          throws IOException {
+        families.add(family);
       }
-    }
+    });
     return families;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 02bb0d3..fa4497f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.fs.RegionFileSystem;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -153,7 +154,7 @@ public class TestRegionObserverScannerOpenHook {
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
     Path path = new Path(DIR + callingMethod);
     WAL wal = HBaseTestingUtility.createWal(conf, path, info);
-    HRegion r = HRegion.createHRegion(info, path, conf, htd, wal);
+    HRegion r = HRegion.createHRegion(conf, path, htd, info, wal);
     // this following piece is a hack. currently a coprocessorHost
     // is secretly loaded at OpenRegionHandler. we don't really
     // start a region server here, so just manually create cphost
@@ -220,11 +221,9 @@ public class TestRegionObserverScannerOpenHook {
   public static class CompactionCompletionNotifyingRegion extends HRegion {
     private static volatile CountDownLatch compactionStateChangeLatch = null;
 
-    @SuppressWarnings("deprecation")
-    public CompactionCompletionNotifyingRegion(Path tableDir, WAL log,
-        FileSystem fs, Configuration confParam, HRegionInfo info,
-        HTableDescriptor htd, RegionServerServices rsServices) {
-      super(tableDir, log, fs, confParam, info, htd, rsServices);
+    public CompactionCompletionNotifyingRegion(final RegionFileSystem rfs, 
final HTableDescriptor htd,
+        final WAL wal, final RegionServerServices rsServices) {
+      super(rfs, htd, wal, rsServices);
     }
 
     public CountDownLatch getCompactionStateChangeLatch() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
index 26e5897..89c8dfc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
@@ -26,10 +26,12 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@@ -125,26 +127,24 @@ public abstract class TableSnapshotInputFormatTestBase {
 
       testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, 
snapshotName,tmpTableDir);
 
-      Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration());
-      for (Path regionDir : FSUtils.getRegionDirs(fs, 
FSUtils.getTableDir(rootDir, tableName))) {
-        for (Path storeDir : FSUtils.getFamilyDirs(fs, regionDir)) {
-          for (FileStatus status : fs.listStatus(storeDir)) {
-            System.out.println(status.getPath());
-            if (StoreFileInfo.isValid(status)) {
-              Path archiveStoreDir = 
HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(),
-                tableName, regionDir.getName(), storeDir.getName());
-
-              Path path = HFileLink.getBackReferencesDir(storeDir, 
status.getPath().getName());
-              // assert back references directory is empty
-              assertFalse("There is a back reference in " + path, 
fs.exists(path));
-
-              path = HFileLink.getBackReferencesDir(archiveStoreDir, 
status.getPath().getName());
-              // assert back references directory is empty
-              assertFalse("There is a back reference in " + path, 
fs.exists(path));
-            }
-          }
+      
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().visitStoreFiles(tableName,
+          new StoreFileVisitor() {
+        @Override
+        public void storeFile(HRegionInfo region, String family, StoreFileInfo 
storeFile)
+            throws IOException {
+          Path archiveStoreDir = 
HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(),
+            region.getTable(), region.getEncodedName(), family);
+
+          // assert back references directory is empty
+          Path storeDir = storeFile.getPath().getParent();
+          Path path = HFileLink.getBackReferencesDir(storeDir, 
storeFile.getPath().getName());
+          assertFalse("There is a back reference in " + path, fs.exists(path));
+
+          // assert back references directory is empty
+          path = HFileLink.getBackReferencesDir(archiveStoreDir, 
storeFile.getPath().getName());
+          assertFalse("There is a back reference in " + path, fs.exists(path));
         }
-      }
+      });
     } finally {
       UTIL.getHBaseAdmin().deleteSnapshot(snapshotName);
       UTIL.deleteTable(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/457ebd7b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index e9e8bdc..b7e980b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -41,11 +41,13 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.fs.RegionFileSystem.StoreFileVisitor;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -199,19 +201,20 @@ public class MasterProcedureTestingUtility {
   }
 
   public static void validateColumnFamilyDeletion(final HMaster master, final 
TableName tableName,
-      final String family) throws IOException {
+      final String deletedFamily) throws IOException {
     // verify htd
     HTableDescriptor htd = master.getTableDescriptors().get(tableName);
     assertTrue(htd != null);
-    assertFalse(htd.hasFamily(family.getBytes()));
+    assertFalse(htd.hasFamily(deletedFamily.getBytes()));
 
     // verify fs
-    final FileSystem fs = master.getMasterFileSystem().getFileSystem();
-    final Path tableDir = 
FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
-    for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
-      final Path familyDir = new Path(regionDir, family);
-      assertFalse(family + " family dir should not exist", 
fs.exists(familyDir));
-    }
+    master.getMasterFileSystem().visitStoreFiles(tableName, new 
StoreFileVisitor() {
+      @Override
+      public void storeFile(HRegionInfo region, String family, StoreFileInfo 
storeFile)
+          throws IOException {
+        assertFalse(family + " family dir should not exist", 
family.equals(deletedFamily));
+      }
+    });
   }
 
   public static void validateColumnFamilyModification(final HMaster master,

Reply via email to