http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b33c853..b87c8cd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.FilterWrapper;
 import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -593,35 +594,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * extensions.  Instances of HRegion should be instantiated with the
    * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method.
    *
-   * @param tableDir qualified path of directory where region should be 
located,
-   * usually the table directory.
-   * @param wal The WAL is the outbound log for any updates to the HRegion
-   * The wal file is a logfile from the previous execution that's
-   * custom-computed for this HRegion. The HRegionServer computes and sorts the
-   * appropriate wal info for this HRegion. If there is a previous wal file
-   * (implying that the HRegion has been written-to before), then read it from
-   * the supplied path.
-   * @param fs is the filesystem.
-   * @param confParam is global configuration settings.
-   * @param regionInfo - HRegionInfo that describes the region
-   * is new), then read them from the supplied path.
-   * @param htd the table descriptor
-   * @param rsServices reference to {@link RegionServerServices} or null
-   * @deprecated Use other constructors.
-   */
-  @Deprecated
-  public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
-      final Configuration confParam, final HRegionInfo regionInfo,
-      final HTableDescriptor htd, final RegionServerServices rsServices) {
-    this(HRegionFileSystem.create(confParam, fs, tableDir, regionInfo),
-      wal, confParam, htd, rsServices);
-  }
-
-  /**
-   * HRegion constructor. This constructor should only be used for testing and
-   * extensions.  Instances of HRegion should be instantiated with the
-   * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method.
-   *
    * @param fs is the filesystem.
    * @param wal The WAL is the outbound log for any updates to the HRegion
    * The wal file is a logfile from the previous execution that's
@@ -794,7 +766,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     // Write HRI to a file in case we need to recover hbase:meta
     status.setStatus("Writing region info on filesystem");
-    fs.checkRegionInfoOnFilesystem();
+    fs.writeRecoveryCheckPoint();
 
     // Initialize all the HStores
     status.setStatus("Initializing all the Stores");
@@ -808,16 +780,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     if (this.writestate.writesEnabled) {
       // Remove temporary data left over from old regions
       status.setStatus("Cleaning up temporary data from old regions");
-      fs.cleanupTempDir();
-    }
-
-    if (this.writestate.writesEnabled) {
-      status.setStatus("Cleaning up detritus from prior splits");
-      // Get rid of any splits or merges that were lost in-progress.  Clean out
-      // these directories here on open.  We may be opening a region that was
-      // being split but we crashed in the middle of it all.
-      fs.cleanupAnySplitDetritus();
-      fs.cleanupMergesDir();
+      fs.cleanup();
     }
 
     // Initialize split policy
@@ -1041,7 +1004,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
     FileSystem fs = tablePath.getFileSystem(conf);
 
-    HRegionFileSystem regionFs = HRegionFileSystem.create(conf, fs, tablePath, 
regionInfo);
+    HRegionFileSystem regionFs = HRegionFileSystem.create(conf, regionInfo);
     for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
       Collection<StoreFileInfo> storeFiles = 
regionFs.getStoreFiles(family.getNameAsString());
       if (storeFiles == null) continue;
@@ -5868,20 +5831,18 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @param htd the table descriptor
    * @return the new instance
    */
-  static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs,
-      Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd,
-      RegionServerServices rsServices) {
+  static HRegion newHRegion(Configuration conf, HTableDescriptor htd,
+      HRegionInfo regionInfo, WAL wal, RegionServerServices rsServices) {
     try {
       @SuppressWarnings("unchecked")
       Class<? extends HRegion> regionClass =
           (Class<? extends HRegion>) conf.getClass(HConstants.REGION_IMPL, 
HRegion.class);
 
       Constructor<? extends HRegion> c =
-          regionClass.getConstructor(Path.class, WAL.class, FileSystem.class,
-              Configuration.class, HRegionInfo.class, HTableDescriptor.class,
-              RegionServerServices.class);
+          regionClass.getConstructor(Configuration.class, 
HTableDescriptor.class,
+              HRegionInfo.class, WAL.class, RegionServerServices.class);
 
-      return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, 
rsServices);
+      return c.newInstance(conf, htd, regionInfo, wal, rsServices);
     } catch (Throwable e) {
       // todo: what should I throw here?
       throw new IllegalStateException("Could not instantiate a region 
instance.", e);
@@ -5905,11 +5866,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     LOG.info("creating HRegion " + info.getTable().getNameAsString()
         + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
         " Table name == " + info.getTable().getNameAsString());
-    FileSystem fs = FileSystem.get(conf);
-    Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
-    HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info);
-    HRegion region = HRegion.newHRegion(tableDir,
-        wal, fs, conf, info, hTableDescriptor, null);
+    HRegionFileSystem.create(conf, info);
+    HRegion region = HRegion.newHRegion(conf, hTableDescriptor, info, wal, 
null);
     if (initialize) {
       // If initializing, set the sequenceId. It is also required by 
WALPerformanceEvaluation when
       // verifying the WALEdits.
@@ -6086,7 +6044,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     if (LOG.isDebugEnabled()) {
       LOG.debug("Opening region: " + info);
     }
-    HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, 
rsServices);
+    HRegion r = HRegion.newHRegion(conf, htd, info, wal, rsServices);
     return r.openHRegion(reporter);
   }
 
@@ -6101,8 +6059,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   public static HRegion openHRegion(final HRegion other, final 
CancelableProgressable reporter)
       throws IOException {
     HRegionFileSystem regionFs = other.getRegionFileSystem();
-    HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), 
regionFs.getFileSystem(),
-        other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
+    HRegion r = newHRegion(other.baseConf, other.getTableDesc(),
+        other.getRegionInfo(), other.getWAL(), null);
     return r.openHRegion(reporter);
   }
 
@@ -6150,9 +6108,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       LOG.debug("HRegion.Warming up region: " + info);
     }
 
-    Path rootDir = FSUtils.getRootDir(conf);
-    Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
-
     FileSystem fs = null;
     if (rsServices != null) {
       fs = rsServices.getFileSystem();
@@ -6161,7 +6116,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       fs = FileSystem.get(conf);
     }
 
-    HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, 
rsServices);
+    HRegion r = HRegion.newHRegion(conf, htd, info, wal, rsServices);
     r.initializeWarmup(reporter);
     r.close();
   }
@@ -6195,8 +6150,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     fs.commitDaughterRegion(hri);
 
     // Create the daughter HRegion instance
-    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), 
fs.getFileSystem(),
-        this.getBaseConf(), hri, this.getTableDesc(), rsServices);
+    HRegion r = HRegion.newHRegion(this.getBaseConf(), this.getTableDesc(),
+        hri, this.getWAL(), rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount() / 2);
     r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
     return r;
@@ -6210,14 +6165,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    */
   HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo,
       final HRegion region_b) throws IOException {
-    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(),
-        fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo,
-        this.getTableDesc(), this.rsServices);
-    r.readRequestsCount.set(this.getReadRequestsCount()
-        + region_b.getReadRequestsCount());
-    r.writeRequestsCount.set(this.getWriteRequestsCount()
-
-        + region_b.getWriteRequestsCount());
+    HRegion r = HRegion.newHRegion(this.getBaseConf(), this.getTableDesc(),
+        mergedRegionInfo, this.getWAL(), this.rsServices);
+    r.readRequestsCount.set(this.getReadRequestsCount() + 
region_b.getReadRequestsCount());
+    r.writeRequestsCount.set(this.getWriteRequestsCount() + 
region_b.getWriteRequestsCount());
     this.fs.commitMergedRegion(mergedRegionInfo);
     return r;
   }
@@ -6248,7 +6199,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       Bytes.toBytes(HConstants.META_VERSION)));
     meta.put(row, HConstants.CATALOG_FAMILY, cells);
   }
-  
+
   /**
    * Determines if the specified row is within the row range specified by the
    * specified HRegionInfo
@@ -7342,9 +7293,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) {
       final WAL wal = walFactory.getMetaWAL(
           HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
-      region = HRegion.newHRegion(p, wal, fs, c,
-        HRegionInfo.FIRST_META_REGIONINFO,
-          fst.get(TableName.META_TABLE_NAME), null);
+      region = HRegion.newHRegion(c, fst.get(TableName.META_TABLE_NAME),
+        HRegionInfo.FIRST_META_REGIONINFO, wal, null);
     } else {
       throw new IOException("Not a known catalog table: " + p.toString());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
deleted file mode 100644
index 9aeb5cd..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ /dev/null
@@ -1,1216 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.fs.layout.FsLayout;
-import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSHDFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * View to an on-disk Region.
- * Provides the set of methods necessary to interact with the on-disk region 
data.
- */
[email protected]
-public class HRegionFileSystem {
-  private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
-
-  /** Name of the region info file that resides just under the region 
directory. */
-  public final static String REGION_INFO_FILE = ".regioninfo";
-
-  /** Temporary subdirectory of the region directory used for merges. */
-  public static final String REGION_MERGES_DIR = ".merges";
-
-  /** Temporary subdirectory of the region directory used for splits. */
-  public static final String REGION_SPLITS_DIR = ".splits";
-
-  /** Temporary subdirectory of the region directory used for compaction 
output. */
-  protected static final String REGION_TEMP_DIR = ".tmp";
-
-  protected final HRegionInfo regionInfo;
-  //regionInfo for interacting with FS (getting encodedName, etc)
-  protected final HRegionInfo regionInfoForFs;
-  protected final Configuration conf;
-  protected final Path tableDir;
-  protected final FileSystem fs;
-  
-  /** Number of characters for DIR name, 4 characters for 16^4 = 65536 
buckets. */
-  public static final int HUMONGOUS_DIR_NAME_SIZE = 4;
-
-  /**
-   * In order to handle NN connectivity hiccups, one need to retry 
non-idempotent operation at the
-   * client level.
-   */
-  protected final int hdfsClientRetriesNumber;
-  protected final int baseSleepBeforeRetries;
-  protected static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
-  protected static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
-
-  /**
-   * Use the static factory methods on this class for construction, unless you 
are an
-   * HRegionFileSystem subclass constructor or the HRegionFileSystemFactory.
-   * 
-   * Create a view to the on-disk region
-   * 
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} that contains the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region
-   */
-  protected HRegionFileSystem(final Configuration conf, final FileSystem fs, 
final Path tableDir,
-      final HRegionInfo regionInfo) {
-    this.fs = fs;
-    this.conf = conf;
-    this.tableDir = tableDir;
-    this.regionInfo = regionInfo;
-    this.regionInfoForFs = 
ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
-    this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
-      DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
-    this.baseSleepBeforeRetries = 
conf.getInt("hdfs.client.sleep.before.retries",
-      DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
- }
-
-  public static HRegionFileSystem create(final Configuration conf, final 
FileSystem fs, final Path tableDir,
-      final HRegionInfo regionInfo) {
-    return FsLayout.getHRegionFileSystemFactory().create(conf, fs, tableDir, 
regionInfo);
-  }
-  
-  /** @return the underlying {@link FileSystem} */
-  public FileSystem getFileSystem() {
-    return this.fs;
-  }
-
-  /** @return the {@link HRegionInfo} that describe this on-disk region view */
-  public HRegionInfo getRegionInfo() {
-    return this.regionInfo;
-  }
-
-  public HRegionInfo getRegionInfoForFS() {
-    return this.regionInfoForFs;
-  }
-
-  /** @return {@link Path} to the region's root directory. */
-  public Path getTableDir() {
-    return this.tableDir;
-  }
-
-  /** @return {@link Path} to the region directory. */
-  public Path getRegionDir() {
-    return FsLayout.getRegionDir(this.tableDir, this.regionInfoForFs);
-  }
-    
-  /** @return {@link Path} to the daughter region provided */
-  public Path getDaughterRegionDir(HRegionInfo daughter) {
-    return FsLayout.getRegionDir(this.tableDir, daughter);
-  }
-  
-  // 
===========================================================================
-  //  Temp Helpers
-  // 
===========================================================================
-  /** @return {@link Path} to the region's temp directory, used for file 
creations */
-  Path getTempDir() {
-    return new Path(getRegionDir(), REGION_TEMP_DIR);
-  }
-
-  /**
-   * Clean up any temp detritus that may have been left around from previous 
operation attempts.
-   */
-  void cleanupTempDir() throws IOException {
-    deleteDir(getTempDir());
-  }
-
-  // 
===========================================================================
-  //  Store/StoreFile Helpers
-  // 
===========================================================================
-  /**
-   * Returns the directory path of the specified family
-   * @param familyName Column Family Name
-   * @return {@link Path} to the directory of the specified family
-   */
-  public Path getStoreDir(final String familyName) {
-    return new Path(this.getRegionDir(), familyName);
-  }
-
-  /**
-   * Create the store directory for the specified family name
-   * @param familyName Column Family Name
-   * @return {@link Path} to the directory of the specified family
-   * @throws IOException if the directory creation fails.
-   */
-  Path createStoreDir(final String familyName) throws IOException {
-    Path storeDir = getStoreDir(familyName);
-    if(!fs.exists(storeDir) && !createDir(storeDir))
-      throw new IOException("Failed creating "+storeDir);
-    return storeDir;
-  }
-
-  /**
-   * Returns the store files available for the family.
-   * This methods performs the filtering based on the valid store files.
-   * @param familyName Column Family Name
-   * @return a set of {@link StoreFileInfo} for the specified family.
-   */
-  public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) 
throws IOException {
-    return getStoreFiles(Bytes.toString(familyName));
-  }
-
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName) 
throws IOException {
-    return getStoreFiles(familyName, true);
-  }
-
-  /**
-   * Returns the store files available for the family.
-   * This methods performs the filtering based on the valid store files.
-   * @param familyName Column Family Name
-   * @return a set of {@link StoreFileInfo} for the specified family.
-   */
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName, 
final boolean validate)
-      throws IOException {
-    Path familyDir = getStoreDir(familyName);
-    FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
-    if (files == null) {
-      LOG.debug("No StoreFiles for: " + familyDir);
-      return null;
-    }
-
-    ArrayList<StoreFileInfo> storeFiles = new 
ArrayList<StoreFileInfo>(files.length);
-    for (FileStatus status: files) {
-      if (validate && !StoreFileInfo.isValid(status)) {
-        LOG.warn("Invalid StoreFile: " + status.getPath());
-        continue;
-      }
-      StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, 
regionInfo,
-        regionInfoForFs, familyName, status.getPath());
-      storeFiles.add(info);
-
-    }
-    return storeFiles;
-  }
-
-  /**
-   * Return Qualified Path of the specified family/file
-   *
-   * @param familyName Column Family Name
-   * @param fileName File Name
-   * @return The qualified Path for the specified family/file
-   */
-  Path getStoreFilePath(final String familyName, final String fileName) {
-    Path familyDir = getStoreDir(familyName);
-    return new Path(familyDir, fileName).makeQualified(this.fs);
-  }
-
-  /**
-   * Return the store file information of the specified family/file.
-   *
-   * @param familyName Column Family Name
-   * @param fileName File Name
-   * @return The {@link StoreFileInfo} for the specified family/file
-   */
-  StoreFileInfo getStoreFileInfo(final String familyName, final String 
fileName)
-      throws IOException {
-    Path familyDir = getStoreDir(familyName);
-    return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
-      regionInfoForFs, familyName, new Path(familyDir, fileName));
-  }
-  
-  void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, 
HRegionInfo daughter)
-      throws IOException {
-    Path splitsDir = getSplitsDir(daughter);
-    if (expectedReferenceFileCount != 0 &&
-        expectedReferenceFileCount != 
FSUtils.getRegionReferenceFileCount(getFileSystem(),
-          splitsDir)) {
-      throw new IOException("Failing split. Expected reference file count 
isn't equal.");
-    }
-  }
-
-  void assertReferenceFileCountOfDaughterDir(int expectedReferenceFileCount, 
HRegionInfo daughter)
-      throws IOException {
-    Path daughterRegionDir = FsLayout.getRegionDir(getTableDir(), daughter);
-    if (expectedReferenceFileCount != 0 &&
-        expectedReferenceFileCount != 
FSUtils.getRegionReferenceFileCount(getFileSystem(),
-          daughterRegionDir)) {
-      throw new IOException("Failing split. Expected reference file count 
isn't equal.");
-    }
-  }
-  
-  /**
-   * Returns true if the specified family has reference files
-   * @param familyName Column Family Name
-   * @return true if family contains reference files
-   * @throws IOException
-   */
-  public boolean hasReferences(final String familyName) throws IOException {
-    FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
-        new FSUtils.ReferenceFileFilter(fs));
-    return files != null && files.length > 0;
-  }
-
-  /**
-   * Check whether region has Reference file
-   * @param htd table desciptor of the region
-   * @return true if region has reference file
-   * @throws IOException
-   */
-  public boolean hasReferences(final HTableDescriptor htd) throws IOException {
-    for (HColumnDescriptor family : htd.getFamilies()) {
-      if (hasReferences(family.getNameAsString())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * @return the set of families present on disk
-   * @throws IOException
-   */
-  public Collection<String> getFamilies() throws IOException {
-    FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new 
FSUtils.FamilyDirFilter(fs));
-    if (fds == null) return null;
-
-    ArrayList<String> families = new ArrayList<String>(fds.length);
-    for (FileStatus status: fds) {
-      families.add(status.getPath().getName());
-    }
-
-    return families;
-  }
-
-  /**
-   * Remove the region family from disk, archiving the store files.
-   * @param familyName Column Family Name
-   * @throws IOException if an error occours during the archiving
-   */
-  public void deleteFamily(final String familyName) throws IOException {
-    // archive family store files
-    HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, 
Bytes.toBytes(familyName));
-
-    // delete the family folder
-    Path familyDir = getStoreDir(familyName);
-    if(fs.exists(familyDir) && !deleteDir(familyDir))
-      throw new IOException("Could not delete family " + familyName
-          + " from FileSystem for region " + 
regionInfoForFs.getRegionNameAsString() + "("
-          + regionInfoForFs.getEncodedName() + ")");
-  }
-
-  /**
-   * Generate a unique file name, used by createTempName() and 
commitStoreFile()
-   * @param suffix extra information to append to the generated name
-   * @return Unique file name
-   */
-  protected static String generateUniqueName(final String suffix) {
-    String name = UUID.randomUUID().toString().replaceAll("-", "");
-    if (suffix != null) name += suffix;
-    return name;
-  }
-
-  /**
-   * Generate a unique temporary Path. Used in conjuction with 
commitStoreFile()
-   * to get a safer file creation.
-   * <code>
-   * Path file = fs.createTempName();
-   * ...StoreFile.Writer(file)...
-   * fs.commitStoreFile("family", file);
-   * </code>
-   *
-   * @return Unique {@link Path} of the temporary file
-   */
-  public Path createTempName() {
-    return createTempName(null);
-  }
-
-  /**
-   * Generate a unique temporary Path. Used in conjuction with 
commitStoreFile()
-   * to get a safer file creation.
-   * <code>
-   * Path file = fs.createTempName();
-   * ...StoreFile.Writer(file)...
-   * fs.commitStoreFile("family", file);
-   * </code>
-   *
-   * @param suffix extra information to append to the generated name
-   * @return Unique {@link Path} of the temporary file
-   */
-  public Path createTempName(final String suffix) {
-    return new Path(getTempDir(), generateUniqueName(suffix));
-  }
-
-  /**
-   * Move the file from a build/temp location to the main family store 
directory.
-   * @param familyName Family that will gain the file
-   * @param buildPath {@link Path} to the file to commit.
-   * @return The new {@link Path} of the committed file
-   * @throws IOException
-   */
-  public Path commitStoreFile(final String familyName, final Path buildPath) 
throws IOException {
-    return commitStoreFile(familyName, buildPath, -1, false);
-  }
-
-  /**
-   * Move the file from a build/temp location to the main family store 
directory.
-   * @param familyName Family that will gain the file
-   * @param buildPath {@link Path} to the file to commit.
-   * @param seqNum Sequence Number to append to the file name (less then 0 if 
no sequence number)
-   * @param generateNewName False if you want to keep the buildPath name
-   * @return The new {@link Path} of the committed file
-   * @throws IOException
-   */
-  protected Path commitStoreFile(final String familyName, final Path buildPath,
-      final long seqNum, final boolean generateNewName) throws IOException {
-    Path storeDir = getStoreDir(familyName);
-    if(!fs.exists(storeDir) && !createDir(storeDir))
-      throw new IOException("Failed creating " + storeDir);
-
-    String name = buildPath.getName();
-    if (generateNewName) {
-      name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + 
"_");
-    }
-    Path dstPath = new Path(storeDir, name);
-    if (!fs.exists(buildPath)) {
-      throw new FileNotFoundException(buildPath.toString());
-    }
-    LOG.debug("Committing store file " + buildPath + " as " + dstPath);
-    // buildPath exists, therefore not doing an exists() check.
-    if (!rename(buildPath, dstPath)) {
-      throw new IOException("Failed rename of " + buildPath + " to " + 
dstPath);
-    }
-    return dstPath;
-  }
-
-
-  /**
-   * Moves multiple store files to the relative region's family store 
directory.
-   * @param storeFiles list of store files divided by family
-   * @throws IOException
-   */
-  void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws 
IOException {
-    for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
-      String familyName = Bytes.toString(es.getKey());
-      for (StoreFile sf: es.getValue()) {
-        commitStoreFile(familyName, sf.getPath());
-      }
-    }
-  }
-
-  /**
-   * Archives the specified store file from the specified family.
-   * @param familyName Family that contains the store files
-   * @param filePath {@link Path} to the store file to remove
-   * @throws IOException if the archiving fails
-   */
-  public void removeStoreFile(final String familyName, final Path filePath)
-      throws IOException {
-    HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
-        this.tableDir, Bytes.toBytes(familyName), filePath);
-  }
-
-  /**
-   * Closes and archives the specified store files from the specified family.
-   * @param familyName Family that contains the store files
-   * @param storeFiles set of store files to remove
-   * @throws IOException if the archiving fails
-   */
-  public void removeStoreFiles(final String familyName, final 
Collection<StoreFile> storeFiles)
-      throws IOException {
-    HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
-        this.tableDir, Bytes.toBytes(familyName), storeFiles);
-  }
-
-  /**
-   * Bulk load: Add a specified store file to the specified family.
-   * If the source file is on the same different file-system is moved from the
-   * source location to the destination location, otherwise is copied over.
-   *
-   * @param familyName Family that will gain the file
-   * @param srcPath {@link Path} to the file to import
-   * @param seqNum Bulk Load sequence number
-   * @return The destination {@link Path} of the bulk loaded file
-   * @throws IOException
-   */
-  Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
-      throws IOException {
-    // Copy the file if it's on another filesystem
-    FileSystem srcFs = srcPath.getFileSystem(conf);
-    FileSystem desFs = fs instanceof HFileSystem ? 
((HFileSystem)fs).getBackingFs() : fs;
-
-    // We can't compare FileSystem instances as equals() includes UGI instance
-    // as part of the comparison and won't work when doing SecureBulkLoad
-    // TODO deal with viewFS
-    if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
-      LOG.info("Bulk-load file " + srcPath + " is on different filesystem than 
" +
-          "the destination store. Copying file over to destination 
filesystem.");
-      Path tmpPath = createTempName();
-      FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
-      LOG.info("Copied " + srcPath + " to temporary path on destination 
filesystem: " + tmpPath);
-      srcPath = tmpPath;
-    }
-
-    return commitStoreFile(familyName, srcPath, seqNum, true);
-  }
-
-  // 
===========================================================================
-  //  Splits Helpers
-  // 
===========================================================================
-  /** @return {@link Path} to the temp directory used during split operations 
*/
-  Path getSplitsDir() {
-    return new Path(getRegionDir(), REGION_SPLITS_DIR);
-  }
-
-  Path getSplitsDir(final HRegionInfo hri) {
-    return new Path(getSplitsDir(), hri.getEncodedName());
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous 
split attempts.
-   */
-  void cleanupSplitsDir() throws IOException {
-    deleteDir(getSplitsDir());
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous
-   * split attempts.
-   * Call this method on initial region deploy.
-   * @throws IOException
-   */
-  void cleanupAnySplitDetritus() throws IOException {
-    Path splitdir = this.getSplitsDir();
-    if (!fs.exists(splitdir)) return;
-    // Look at the splitdir.  It could have the encoded names of the daughter
-    // regions we tried to make.  See if the daughter regions actually got made
-    // out under the tabledir.  If here under splitdir still, then the split 
did
-    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
-    // where we successfully created daughter a but regionserver crashed during
-    // the creation of region b.  In this case, there'll be an orphan daughter
-    // dir in the filesystem.  TOOD: Fix.
-    FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new 
FSUtils.DirFilter(fs));
-    if (daughters != null) {
-      for (FileStatus daughter: daughters) {
-        Path daughterDir = FsLayout.getRegionDir(getTableDir(), 
daughter.getPath().getName());
-        if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
-          throw new IOException("Failed delete of " + daughterDir);
-        }
-      }
-    }
-    cleanupSplitsDir();
-    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
-  }
-
-  /**
-   * Remove daughter region
-   * @param regionInfo daughter {@link HRegionInfo}
-   * @throws IOException
-   */
-  void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, regionInfo);
-    if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Commit a daughter region, moving it from the split temporary directory
-   * to the proper location in the filesystem.
-   *
-   * @param regionInfo                 daughter {@link 
org.apache.hadoop.hbase.HRegionInfo}
-   * @throws IOException
-   */
-  Path commitDaughterRegion(final HRegionInfo regionInfo)
-      throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, regionInfo);
-    Path daughterTmpDir = this.getSplitsDir(regionInfo);
-    
-    // 
-    // 
-    // /table/bucket2/parent/.splits/daughter/.regioninfo
-    // /table/bucket1
-
-    if (fs.exists(daughterTmpDir)) {
-
-      // Write HRI to a file in case we need to recover hbase:meta
-      Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
-      byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
-      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
-      
-      // Move the daughter region dir to its final place
-      moveNewRegionFromTmpDirToRegionDir(daughterTmpDir, regionDir);
-    }
-
-    return regionDir;
-  }
-  
-  /**
-   * Finalize the creation of a new region by moving it from a temporary 
staging
-   * directory to its final region directory in the table directory
-   * 
-   * Example: Moving /table/parent/.splits/daughter to /table/daughter for a 
new
-   * daughter region created from a region split
-   * 
-   * @param source  temporary staging directory
-   * @param dest    final region directory
-   * @throws IOException 
-   */
-  void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws 
IOException {
-    if (!rename(source, dest)) {
-      throw new IOException("Unable to rename " + source + " to " + dest);
-    }
-  }
-
-  /**
-   * Create the region splits directory.
-   */
-  void createSplitsDir() throws IOException {
-    Path splitdir = getSplitsDir();
-    if (fs.exists(splitdir)) {
-      LOG.info("The " + splitdir + " directory exists.  Hence deleting it to 
recreate it");
-      if (!deleteDir(splitdir)) {
-        throw new IOException("Failed deletion of " + splitdir
-            + " before creating them again.");
-      }
-    }
-    // splitDir doesn't exists now. No need to do an exists() call for it.
-    if (!createDir(splitdir)) {
-      throw new IOException("Failed create of " + splitdir);
-    }
-  }
-
-  /**
-   * Write out a split reference. Package local so it doesnt leak out of
-   * regionserver.
-   * @param hri {@link HRegionInfo} of the destination
-   * @param familyName Column Family Name
-   * @param f File to split.
-   * @param splitRow Split Row
-   * @param top True if we are referring to the top half of the hfile.
-   * @param splitPolicy
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  Path splitStoreFile(final HRegionInfo hri, final String familyName, final 
StoreFile f,
-      final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy)
-          throws IOException {
-
-    if (splitPolicy == null || 
!splitPolicy.skipStoreFileRangeCheck(familyName)) {
-      // Check whether the split row lies in the range of the store file
-      // If it is outside the range, return directly.
-      try {
-        if (top) {
-          //check if larger than last key.
-          KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
-          byte[] lastKey = f.createReader().getLastKey();
-          // If lastKey is null means storefile is empty.
-          if (lastKey == null) {
-            return null;
-          }
-          if (f.getReader().getComparator().compare(splitKey, lastKey, 0, 
lastKey.length) > 0) {
-            return null;
-          }
-        } else {
-          //check if smaller than first key
-          KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
-          Cell firstKey = f.createReader().getFirstKey();
-          // If firstKey is null means storefile is empty.
-          if (firstKey == null) {
-            return null;
-          }
-          if (f.getReader().getComparator().compare(splitKey, firstKey) < 0) {
-            return null;
-          }
-        }
-      } finally {
-        f.closeReader(true);
-      }
-    }
-
-    Path splitDir = new Path(getSplitsDir(hri), familyName);
-    // A reference to the bottom half of the hsf store file.
-    Reference r =
-      top ? Reference.createTopReference(splitRow): 
Reference.createBottomReference(splitRow);
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above.  The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String parentRegionName = regionInfoForFs.getEncodedName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    return createReferenceFile(r, f, parentRegionName, splitDir);
-  }
-  
-  Path createReferenceFile(Reference r, StoreFile f, String 
originalRegionName, Path targetDir) throws IOException {
-    Path p = new Path(targetDir, f.getPath().getName() + "." + 
originalRegionName);
-    return r.write(fs, p);
-  }
-
-  // 
===========================================================================
-  //  Merge Helpers
-  // 
===========================================================================
-  /** @return {@link Path} to the temp directory used during merge operations 
*/
-  Path getMergesDir() {
-    return new Path(getRegionDir(), REGION_MERGES_DIR);
-  }
-
-  Path getMergesDir(final HRegionInfo hri) {
-    return new Path(getMergesDir(), hri.getEncodedName());
-  }
-
-  /**
-   * Clean up any merge detritus that may have been left around from previous 
merge attempts.
-   */
-  void cleanupMergesDir() throws IOException {
-    deleteDir(getMergesDir());
-  }
-
-  /**
-   * Remove merged region
-   * @param mergedRegion {@link HRegionInfo}
-   * @throws IOException
-   */
-  void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, mergedRegion);
-    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Create the region merges directory.
-   * @throws IOException If merges dir already exists or we fail to create it.
-   * @see HRegionFileSystem#cleanupMergesDir()
-   */
-  void createMergesDir() throws IOException {
-    Path mergesdir = getMergesDir();
-    if (fs.exists(mergesdir)) {
-      LOG.info("The " + mergesdir
-          + " directory exists.  Hence deleting it to recreate it");
-      if (!fs.delete(mergesdir, true)) {
-        throw new IOException("Failed deletion of " + mergesdir
-            + " before creating them again.");
-      }
-    }
-    if (!fs.mkdirs(mergesdir))
-      throw new IOException("Failed create of " + mergesdir);
-  }
-
-  /**
-   * Write out a merge reference under the given merges directory. Package 
local
-   * so it doesnt leak out of regionserver.
-   * @param mergedRegion {@link HRegionInfo} of the merged region
-   * @param familyName Column Family Name
-   * @param f File to create reference.
-   * @param mergedDir
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
-      final StoreFile f, final Path mergedDir)
-      throws IOException {
-    Path referenceDir = new Path(new Path(mergedDir,
-        mergedRegion.getEncodedName()), familyName);
-    // A whole reference to the store file.
-    Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above. The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String mergingRegionName = regionInfoForFs.getEncodedName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    return createReferenceFile(r, f, mergingRegionName, referenceDir);
-  }
-
-  /**
-   * Commit a merged region, moving it from the merges temporary directory to
-   * the proper location in the filesystem.
-   * @param mergedRegionInfo merged region {@link HRegionInfo}
-   * @throws IOException
-   */
-  void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws 
IOException {
-    Path regionDir = FsLayout.getRegionDir(this.tableDir, mergedRegionInfo);
-    Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
-    // Move the tmp dir in the expected location
-    if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
-      moveNewRegionFromTmpDirToRegionDir(mergedRegionTmpDir, regionDir);
-    }
-  }
-
-  // 
===========================================================================
-  //  Create/Open/Delete Helpers
-  // 
===========================================================================
-  /**
-   * Log the current state of the region
-   * @param LOG log to output information
-   * @throws IOException if an unexpected exception occurs
-   */
-  void logFileSystemState(final Log LOG) throws IOException {
-    FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
-  }
-
-  /**
-   * @param hri
-   * @return Content of the file we write out to the filesystem under a region
-   * @throws IOException
-   */
-  protected static byte[] getRegionInfoFileContent(final HRegionInfo hri) 
throws IOException {
-    return hri.toDelimitedByteArray();
-  }
-
-  /**
-   * Create a {@link HRegionInfo} from the serialized version on-disk.
-   * @param fs {@link FileSystem} that contains the Region Info file
-   * @param regionDir {@link Path} to the Region Directory that contains the 
Info file
-   * @return An {@link HRegionInfo} instance gotten from the Region Info file.
-   * @throws IOException if an error occurred during file open/read operation.
-   */
-  public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, 
final Path regionDir)
-      throws IOException {
-    FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
-    try {
-      return HRegionInfo.parseFrom(in);
-    } finally {
-      in.close();
-    }
-  }
-
-  /**
-   * Write the .regioninfo file on-disk.
-   */
-  protected static void writeRegionInfoFileContent(final Configuration conf, 
final FileSystem fs,
-      final Path regionInfoFile, final byte[] content) throws IOException {
-    // First check to get the permissions
-    FsPermission perms = FSUtils.getFilePermissions(fs, conf, 
HConstants.DATA_FILE_UMASK_KEY);
-    // Write the RegionInfo file content
-    FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
-    try {
-      out.write(content);
-    } finally {
-      out.close();
-    }
-  }
-
-  /**
-   * Write out an info file under the stored region directory. Useful 
recovering mangled regions.
-   * If the regionInfo already exists on-disk, then we fast exit.
-   */
-  void checkRegionInfoOnFilesystem() throws IOException {
-    // Compose the content of the file so we can compare to length in 
filesystem. If not same,
-    // rewrite it (it may have been written in the old format using Writables 
instead of pb). The
-    // pb version is much shorter -- we write now w/o the toString version -- 
so checking length
-    // only should be sufficient. I don't want to read the file every time to 
check if it pb
-    // serialized.
-    byte[] content = getRegionInfoFileContent(regionInfoForFs);
-    try {
-      Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
-
-      FileStatus status = fs.getFileStatus(regionInfoFile);
-      if (status != null && status.getLen() == content.length) {
-        // Then assume the content good and move on.
-        // NOTE: that the length is not sufficient to define the the content 
matches.
-        return;
-      }
-
-      LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
-      if (!fs.delete(regionInfoFile, false)) {
-        throw new IOException("Unable to remove existing " + regionInfoFile);
-      }
-    } catch (FileNotFoundException e) {
-      LOG.warn(REGION_INFO_FILE + " file not found for region: " + 
regionInfoForFs.getEncodedName() +
-          " on table " + regionInfo.getTable());
-    }
-
-    // Write HRI to a file in case we need to recover hbase:meta
-    writeRegionInfoOnFilesystem(content, true);
-  }
-
-  /**
-   * Write out an info file under the region directory. Useful recovering 
mangled regions.
-   * @param useTempDir indicate whether or not using the region .tmp dir for a 
safer file creation.
-   */
-  protected void writeRegionInfoOnFilesystem(boolean useTempDir) throws 
IOException {
-    byte[] content = getRegionInfoFileContent(regionInfoForFs);
-    writeRegionInfoOnFilesystem(content, useTempDir);
-  }
-
-  /**
-   * Write out an info file under the region directory. Useful recovering 
mangled regions.
-   * @param regionInfoContent serialized version of the {@link HRegionInfo}
-   * @param useTempDir indicate whether or not using the region .tmp dir for a 
safer file creation.
-   */
-  protected void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
-      final boolean useTempDir) throws IOException {
-    Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
-    if (useTempDir) {
-      // Create in tmpDir and then move into place in case we crash after
-      // create but before close. If we don't successfully close the file,
-      // subsequent region reopens will fail the below because create is
-      // registered in NN.
-
-      // And then create the file
-      Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
-
-      // If datanode crashes or if the RS goes down just before the close is 
called while trying to
-      // close the created regioninfo file in the .tmp directory then on next
-      // creation we will be getting AlreadyCreatedException.
-      // Hence delete and create the file if exists.
-      if (FSUtils.isExists(fs, tmpPath)) {
-        FSUtils.delete(fs, tmpPath, true);
-      }
-
-      // Write HRI to a file in case we need to recover hbase:meta
-      writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
-
-      // Move the created file to the original path
-      if (fs.exists(tmpPath) &&  !rename(tmpPath, regionInfoFile)) {
-        throw new IOException("Unable to rename " + tmpPath + " to " + 
regionInfoFile);
-      }
-    } else {
-      // Write HRI to a file in case we need to recover hbase:meta
-      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
-    }
-  }
-
-  /**
-   * Create a new Region on file-system.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to add the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be added
-   * @param humongousTable
-   * @throws IOException if the region creation fails due to a FileSystem 
exception.
-   */
-  public static HRegionFileSystem createRegionOnFileSystem(
-      final Configuration conf, final FileSystem fs, final Path tableDir,
-      final HRegionInfo regionInfo)
-      throws IOException {
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (fs.exists(regionDir)) {
-      LOG.warn("Trying to create a region that already exists on disk: " + 
regionDir);
-      throw new IOException("The specified region already exists on disk: " + 
regionDir);
-    }
-
-    // Create the region directory
-    if (!createDirOnFileSystem(fs, conf, regionDir)) {
-      LOG.warn("Unable to create the region directory: " + regionDir);
-      throw new IOException("Unable to create region directory: " + regionDir);
-    }
-
-    // Write HRI to a file in case we need to recover hbase:meta
-    regionFs.writeRegionInfoOnFilesystem(false);
-    return regionFs;
-  }
-  
-  /**
-   * Call this only if you don't have the HRegionInfo in memory.
-   * This method will load it from disk.
-   * 
-   * @param conf
-   * @param fs
-   * @param tableDir
-   * @param encodedRegionName
-   * @param readOnly
-   * @return
-   * @throws IOException
-   */
-  public static HRegionFileSystem openRegionFromFileSystem(final Configuration 
conf,
-      final FileSystem fs, final Path tableDir, final String 
encodedRegionName, boolean readOnly)
-      throws IOException {
-    Path regionDir = FsLayout.getRegionDir(tableDir, encodedRegionName);
-    HRegionInfo hri = loadRegionInfoFileContent(fs, regionDir);
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, hri);
-
-    if (!regionFs.existsOnDisk()) {
-      LOG.warn("Trying to open a region that do not exists on disk: " + 
regionDir);
-      throw new IOException("The specified region do not exists on disk: " + 
regionDir);
-    }
-
-    if (!readOnly) {
-      // Cleanup temporary directories
-      regionFs.cleanupTempDir();
-      regionFs.cleanupSplitsDir();
-      regionFs.cleanupMergesDir();
-
-      // if it doesn't exists, Write HRI to a file, in case we need to recover 
hbase:meta
-      regionFs.checkRegionInfoOnFilesystem();
-    }
-
-    return regionFs;
-  }
-
-  /**
-   * Open Region from file-system.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to add the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be added
-   * @param readOnly True if you don't want to edit the region data
-   * @throws IOException if the region creation fails due to a FileSystem 
exception.
-   */
-  public static HRegionFileSystem openRegionFromFileSystem(final Configuration 
conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, 
boolean readOnly)
-      throws IOException {
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (!regionFs.existsOnDisk()) {
-      LOG.warn("Trying to open a region that do not exists on disk: " + 
regionDir);
-      throw new IOException("The specified region do not exists on disk: " + 
regionDir);
-    }
-
-    if (!readOnly) {
-      // Cleanup temporary directories
-      regionFs.cleanupTempDir();
-      regionFs.cleanupSplitsDir();
-      regionFs.cleanupMergesDir();
-
-      // if it doesn't exists, Write HRI to a file, in case we need to recover 
hbase:meta
-      regionFs.checkRegionInfoOnFilesystem();
-    }
-
-    return regionFs;
-  }
-  
-  /**
-   * Does the region directory for this HRFS instance exist on disk
-   * @return true if the region directory exists
-   * @throws IOException
-   */
-  public boolean existsOnDisk() throws IOException {
-    Path regionDir = getRegionDir();
-    return fs.exists(regionDir);
-  }
-  
-  /**
-   * Delete the region directory if exists.
-   * @param conf
-   * @param hri
-   * @return True if deleted the region directory.
-   * @throws IOException
-   */
-  public static boolean deleteRegionDir(final Configuration conf, final 
HRegionInfo hri)
-  throws IOException {
-    Path rootDir = FSUtils.getRootDir(conf);
-    FileSystem fs = rootDir.getFileSystem(conf);
-    return FSUtils.deleteDirectory(fs,
-      FsLayout.getRegionDir(FSUtils.getTableDir(rootDir, hri.getTable()), 
hri.getEncodedName()));
-  }
-  
-  /**
-   * Remove the region from the table directory, archiving the region's hfiles.
-   * @param conf the {@link Configuration} to use
-   * @param fs {@link FileSystem} from which to remove the region
-   * @param tableDir {@link Path} to where the table is being stored
-   * @param regionInfo {@link HRegionInfo} for region to be deleted
-   * @throws IOException if the request cannot be completed
-   */
-  public static void deleteAndArchiveRegionFromFileSystem(final Configuration 
conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) 
throws IOException {
-    HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo);
-    Path regionDir = regionFs.getRegionDir();
-
-    if (!fs.exists(regionDir)) {
-      LOG.warn("Trying to delete a region that do not exists on disk: " + 
regionDir);
-      return;
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETING region " + regionDir);
-    }
-
-    // Archive region
-    Path rootDir = FSUtils.getRootDir(conf);
-    HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
-
-    // Delete empty region dir
-    if (!fs.delete(regionDir, true)) {
-      LOG.warn("Failed delete of " + regionDir);
-    }
-  }
-
-  /**
-   * Creates a directory. Assumes the user has already checked for this 
directory existence.
-   * @param dir
-   * @return the result of fs.mkdirs(). In case underlying fs throws an 
IOException, it checks
-   *         whether the directory exists or not, and returns true if it 
exists.
-   * @throws IOException
-   */
-  boolean createDir(Path dir) throws IOException {
-    int i = 0;
-    IOException lastIOE = null;
-    do {
-      try {
-        return fs.mkdirs(dir);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (fs.exists(dir)) return true; // directory is present
-        try {
-          sleepBeforeRetry("Create Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-    throw new IOException("Exception in createDir", lastIOE);
-  }
-
-  /**
-   * Renames a directory. Assumes the user has already checked for this 
directory existence.
-   * @param srcpath
-   * @param dstPath
-   * @return true if rename is successful.
-   * @throws IOException
-   */
-  boolean rename(Path srcpath, Path dstPath) throws IOException {
-    IOException lastIOE = null;
-    int i = 0;
-    do {
-      try {
-        return fs.rename(srcpath, dstPath);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // 
successful move
-        // dir is not there, retry after some time.
-        try {
-          sleepBeforeRetry("Rename Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in rename", lastIOE);
-  }
-
-  /**
-   * Deletes a directory. Assumes the user has already checked for this 
directory existence.
-   * @param dir
-   * @return true if the directory is deleted.
-   * @throws IOException
-   */
-  boolean deleteDir(Path dir) throws IOException {
-    IOException lastIOE = null;
-    int i = 0;
-    do {
-      try {
-        return fs.delete(dir, true);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (!fs.exists(dir)) return true;
-        // dir is there, retry deleting after some time.
-        try {
-          sleepBeforeRetry("Delete Directory", i+1);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in DeleteDir", lastIOE);
-  }
-
-  /**
-   * sleeping logic; handles the interrupt exception.
-   */
-  protected void sleepBeforeRetry(String msg, int sleepMultiplier) throws 
InterruptedException {
-    sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, 
hdfsClientRetriesNumber);
-  }
-
-  /**
-   * Creates a directory for a filesystem and configuration object. Assumes 
the user has already
-   * checked for this directory existence.
-   * @param fs
-   * @param conf
-   * @param dir
-   * @return the result of fs.mkdirs(). In case underlying fs throws an 
IOException, it checks
-   *         whether the directory exists or not, and returns true if it 
exists.
-   * @throws IOException
-   */
-  protected static boolean createDirOnFileSystem(FileSystem fs, Configuration 
conf, Path dir)
-      throws IOException {
-    int i = 0;
-    IOException lastIOE = null;
-    int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
-      DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
-    int baseSleepBeforeRetries = 
conf.getInt("hdfs.client.sleep.before.retries",
-      DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
-    do {
-      try {
-        return fs.mkdirs(dir);
-      } catch (IOException ioe) {
-        lastIOE = ioe;
-        if (fs.exists(dir)) return true; // directory is present
-        try {
-          sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, 
hdfsClientRetriesNumber);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-        }
-      }
-    } while (++i <= hdfsClientRetriesNumber);
-
-    throw new IOException("Exception in createDir", lastIOE);
-  }
-
-  /**
-   * sleeping logic for static methods; handles the interrupt exception. 
Keeping a static version
-   * for this to avoid re-looking for the integer values.
-   */
-  protected static void sleepBeforeRetry(String msg, int sleepMultiplier, int 
baseSleepBeforeRetries,
-      int hdfsClientRetriesNumber) throws InterruptedException {
-    if (sleepMultiplier > hdfsClientRetriesNumber) {
-      LOG.debug(msg + ", retries exhausted");
-      return;
-    }
-    LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + 
sleepMultiplier);
-    Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
deleted file mode 100644
index d278624..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-
-public class HRegionFileSystemFactory {
-  public HRegionFileSystem create(final Configuration conf, final FileSystem 
fs, final Path tableDir,
-      final HRegionInfo regionInfo) {
-    return new HRegionFileSystem(conf, fs, tableDir, regionInfo);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 85eac25..e15f61a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -208,7 +209,7 @@ public class HStore implements Store {
     this.fs = region.getRegionFileSystem();
 
     // Assemble the store's home directory and Ensure it exists.
-    fs.createStoreDir(family.getNameAsString());
+    fs.openFamily(family.getNameAsString());
     this.region = region;
     this.family = family;
     // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
@@ -1197,8 +1198,7 @@ public class HStore implements Store {
       // Ready to go. Have list of files to compact.
       LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) 
in "
           + this + " of " + this.getRegionInfo().getRegionNameAsString()
-          + " into tmpdir=" + fs.getTempDir() + ", totalSize="
-          + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
+          + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), 
"", 1));
 
       // Commence the compaction.
       List<Path> newFiles = compaction.compact(throughputController);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
deleted file mode 100644
index 5378a69..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.fs.layout.StandardHBaseFsLayout;
-
-import com.google.common.annotations.VisibleForTesting;
-
-public class HierarchicalHRegionFileSystem extends HRegionFileSystem {
-  protected HierarchicalHRegionFileSystem(Configuration conf, FileSystem fs, 
Path tableDir,
-      HRegionInfo regionInfo) {
-    super(conf, fs, tableDir, regionInfo);
-  }
-
-  @Override
-  void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws 
IOException {
-    fs.mkdirs(dest.getParent());
-    super.moveNewRegionFromTmpDirToRegionDir(source, dest);
-  }
-
-  // Probably will never use this function for real, just in tests to compare
-  // humongous vs regular region dir functionality
-  @VisibleForTesting
-  public Path getStandadHBaseRegionDir() {
-    return StandardHBaseFsLayout.get().getRegionDir(tableDir, 
regionInfoForFs.getEncodedName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
deleted file mode 100644
index fbca254..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-
-public class HierarchicalHRegionFileSystemFactory extends 
HRegionFileSystemFactory {
-  @Override
-  public HRegionFileSystem create(Configuration conf, FileSystem fs, Path 
tableDir,
-      HRegionInfo regionInfo) {
-    return new HierarchicalHRegionFileSystem(conf, fs, tableDir, regionInfo);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
new file mode 100644
index 0000000..58f1927
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionDoesNotExistException.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RegionException;
+
+@SuppressWarnings("serial")
[email protected]
[email protected]
+public class RegionDoesNotExistException extends RegionException {
+  /**
+   * @param msg full description of the failure
+   */
+  public RegionDoesNotExistException(String msg) {
+    super(msg);
+  }
+
+  /**
+   * @param hri expected region to find
+   */
+  public RegionDoesNotExistException(HRegionInfo hri) {
+    super("Region '" + hri + "' doesn't exist on the filesystem");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
index 2e6b821..fcc0eec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import 
org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -197,7 +198,7 @@ public class RegionMergeTransactionImpl implements 
RegionMergeTransaction {
           region_a.getRegionInfo().getRegionName());
       if (regionAHasMergeQualifier ||
           hasMergeQualifierInMeta(services, 
region_b.getRegionInfo().getRegionName())) {
-        LOG.debug("Region " + (regionAHasMergeQualifier ? 
+        LOG.debug("Region " + (regionAHasMergeQualifier ?
             region_a.getRegionInfo().getRegionNameAsString()
                 : region_b.getRegionInfo().getRegionNameAsString())
             + " is not mergeable because it has merge qualifier in META");

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
index a7f8495..6ecb4e9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
@@ -372,7 +373,7 @@ public class SplitTransactionImpl implements 
SplitTransaction {
 
     return new PairOfSameType<Region>(a, b);
   }
-  
+
   @VisibleForTesting
   void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, 
HRegionInfo daughter)
       throws IOException {
@@ -386,7 +387,7 @@ public class SplitTransactionImpl implements 
SplitTransaction {
     this.parent.getRegionFileSystem().assertReferenceFileCountOfDaughterDir(
       expectedReferenceFileCount, daughter);
   }
-  
+
   /**
    * Perform time consuming opening of the daughter regions.
    * @param server Hosting server instance.  Can be null when testing

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index c5ef7fd..69bf1ec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.layout.FsLayout;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -259,19 +260,45 @@ public class StoreFileInfo {
     return reader;
   }
 
+  private interface ComputeFileInfo<T> {
+    T compute(FileSystem fs, FileStatus status, int offset, int length) throws 
IOException;
+  }
+
   /**
    * Compute the HDFS Block Distribution for this StoreFile
    */
   public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem 
fs)
       throws IOException {
+    return computeFileInfo(fs, new ComputeFileInfo<HDFSBlocksDistribution>() {
+      @Override
+      public HDFSBlocksDistribution compute(FileSystem fs, FileStatus status, 
int offset, int length) {
+        return FSUtils.computeHDFSBlocksDistribution(fs, status, offset, 
length);
+      }
+    });
+  }
 
+  public BlockLocation[] getFileBlockLocations(final FileSystem fs)
+      throws IOException {
+    return computeFileInfo(fs, new ComputeFileInfo<BlockLocation[]>() {
+      @Override
+      public BlockLocation[] compute(FileSystem fs, FileStatus status, int 
offset, int length) {
+        return fs.getFileBlockLocations(status, offset, length);
+      }
+    });
+  }
+
+  /**
+   * Compute the HDFS Block Distribution for this StoreFile
+   */
+  private <T> T computeFileInfo(final FileSystem fs,
+      final ComputeFileInfo<T> computeObj) throws IOException {
     // guard against the case where we get the FileStatus from link, but by 
the time we
     // call compute the file is moved again
     if (this.link != null) {
       FileNotFoundException exToThrow = null;
       for (int i = 0; i < this.link.getLocations().length; i++) {
         try {
-          return computeHDFSBlocksDistributionInternal(fs);
+          return computeFileInfoInternal(fs);
         } catch (FileNotFoundException ex) {
           // try the other location
           exToThrow = ex;
@@ -279,18 +306,49 @@ public class StoreFileInfo {
       }
       throw exToThrow;
     } else {
-      return computeHDFSBlocksDistributionInternal(fs);
+      return computeFileInfoInternal(fs, computeObj);
     }
   }
 
-  private HDFSBlocksDistribution computeHDFSBlocksDistributionInternal(final 
FileSystem fs)
+  private <T> T computeFileInfoInternal(final FileSystem fs, final 
ComputeFileInfo<T> computeObj)
       throws IOException {
     FileStatus status = getReferencedFileStatus(fs);
     if (this.reference != null) {
-      return computeRefFileHDFSBlockDistribution(fs, reference, status);
+      return computeRefFileInfo(fs, reference, status, computeObj);
+    } else {
+      return computeObj.compute(fs, status, 0, status.getLen());
+    }
+  }
+
+  /**
+   * helper function to compute HDFS blocks distribution of a given reference
+   * file.For reference file, we don't compute the exact value. We use some
+   * estimate instead given it might be good enough. we assume bottom part
+   * takes the first half of reference file, top part takes the second half
+   * of the reference file. This is just estimate, given
+   * midkey ofregion != midkey of HFile, also the number and size of keys vary.
+   * If this estimate isn't good enough, we can improve it later.
+   * @param fs  The FileSystem
+   * @param reference  The reference
+   * @param status  The reference FileStatus
+   * @return HDFS blocks distribution
+   */
+  private static <T> T computeRefFileInfo(final FileSystem fs, final Reference 
reference,
+      final FileStatus status, final ComputeFileInfo<T> computeObj) throws 
IOException {
+    if (status == null) {
+      return null;
+    }
+
+    long start = 0;
+    long length = 0;
+    if (Reference.isTopFileRegion(reference.getFileRegion())) {
+      start = status.getLen()/2;
+      length = status.getLen() - status.getLen()/2;
     } else {
-      return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, 
status.getLen());
+      start = 0;
+      length = status.getLen()/2;
     }
+    return computeObj.compute(fs, status, start, length);
   }
 
   /**
@@ -388,13 +446,13 @@ public class StoreFileInfo {
     Matcher m = REF_NAME_PATTERN.matcher(name);
     return m.matches() && m.groupCount() > 1;
   }
-  
+
   /*
    * Return path to the file referred to by a Reference.  Presumes a directory
    * hierarchy of 
<code>${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname</code>.
-   * Unless the table is a humongous table in which case the hierarchy is 
+   * Unless the table is a humongous table in which case the hierarchy is
    * 
<code>${hbase.rootdir}/data/${namespace}/tablename/bucket/regionname/familyname</code>.
-   * 
+   *
    * @param p Path to a Reference file.
    * @return Calculated path to parent region file.
    * @throws IllegalArgumentException when path regex fails to match.
@@ -406,7 +464,7 @@ public class StoreFileInfo {
       throw new IllegalArgumentException("Failed match of store file name " +
           p.toString());
     }
-  
+
     // Other region name is suffix on the passed Reference file name
     String otherRegion = m.group(2);
     // Tabledir is up two directories from where Reference was written.
@@ -417,7 +475,7 @@ public class StoreFileInfo {
       LOG.debug("reference '" + p + "' to region=" + otherRegion
         + " hfile=" + nameStrippedOfSuffix);
     }
-  
+
     return new Path(new Path(FsLayout.getRegionDir(tableDir, otherRegion), 
p.getParent()
           .getName()), nameStrippedOfSuffix);
   }
@@ -456,39 +514,6 @@ public class StoreFileInfo {
     return validateStoreFileName(p.getName());
   }
 
-  /**
-   * helper function to compute HDFS blocks distribution of a given reference
-   * file.For reference file, we don't compute the exact value. We use some
-   * estimate instead given it might be good enough. we assume bottom part
-   * takes the first half of reference file, top part takes the second half
-   * of the reference file. This is just estimate, given
-   * midkey ofregion != midkey of HFile, also the number and size of keys vary.
-   * If this estimate isn't good enough, we can improve it later.
-   * @param fs  The FileSystem
-   * @param reference  The reference
-   * @param status  The reference FileStatus
-   * @return HDFS blocks distribution
-   */
-  private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
-      final FileSystem fs, final Reference reference, final FileStatus status)
-      throws IOException {
-    if (status == null) {
-      return null;
-    }
-
-    long start = 0;
-    long length = 0;
-
-    if (Reference.isTopFileRegion(reference.getFileRegion())) {
-      start = status.getLen()/2;
-      length = status.getLen() - status.getLen()/2;
-    } else {
-      start = 0;
-      length = status.getLen()/2;
-    }
-    return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
-  }
-
   @Override
   public boolean equals(Object that) {
     if (this == that) return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index e4b29ee..1e40ca1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.fs.layout.FsLayout;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -592,7 +592,7 @@ public class RestoreSnapshotHelper {
     Path referenceFile = new Path(new Path(FsLayout.getRegionDir(new Path(
       snapshotTable.getNameAsString()), regionInfo), familyDir.getName()), 
hfileName);
     Path referredToFile = StoreFileInfo.getReferredToFile(referenceFile);
-    
+
     String snapshotRegionName = 
referredToFile.getParent().getParent().getName();
     String fileName = referredToFile.getName();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 85f08af..406f203 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -40,12 +40,12 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -200,8 +200,7 @@ public class SnapshotManifest {
     RegionVisitor visitor = createRegionVisitor(desc);
 
     // Open the RegionFS
-    HRegionFileSystem regionFs = 
HRegionFileSystem.openRegionFromFileSystem(conf, fs,
-          tableDir, regionInfo, true);
+    HRegionFileSystem regionFs = HRegionFileSystem.open(conf, regionInfo, 
true);
     monitor.rethrowException();
 
     // 1. dump region meta info into the snapshot directory

http://git-wip-us.apache.org/repos/asf/hbase/blob/e7743d77/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index ad7c93a..6337174 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.fs.HRegionFileSystem;
 import org.apache.hadoop.hbase.HRegionInfo;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -76,8 +76,7 @@ public class SnapshotManifestV1 {
     }
 
     public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws 
IOException {
-      HRegionFileSystem snapshotRegionFs = 
HRegionFileSystem.createRegionOnFileSystem(conf,
-        fs, snapshotDir, regionInfo);
+      HRegionFileSystem snapshotRegionFs = 
HRegionFileSystem.createSnapshot(conf, regionInfo);
       return snapshotRegionFs;
     }
 
@@ -157,8 +156,7 @@ public class SnapshotManifestV1 {
 
   static SnapshotRegionManifest buildManifestFromDisk (final Configuration 
conf,
       final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) 
throws IOException {
-    HRegionFileSystem regionFs = 
HRegionFileSystem.openRegionFromFileSystem(conf, fs,
-          tableDir, regionInfo, true);
+    HRegionFileSystem regionFs = HRegionFileSystem.open(conf, regionInfo, 
true);
     SnapshotRegionManifest.Builder manifest = 
SnapshotRegionManifest.newBuilder();
 
     // 1. dump region meta info into the snapshot directory

Reply via email to