Repository: hbase
Updated Branches:
  refs/heads/hbase-14439 300bdfd2a -> d6ef946f4


http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index d824d70..40bd961 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -65,8 +65,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -163,8 +161,8 @@ public final class SnapshotTestingUtils {
       HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, 
byte[] family)
       throws IOException {
     MasterStorage mfs = 
testUtil.getHBaseCluster().getMaster().getMasterStorage();
-    confirmSnapshotValid(snapshotDescriptor, tableName, family,
-        mfs.getRootDir(), testUtil.getHBaseAdmin(), mfs.getFileSystem());
+//    confirmSnapshotValid(snapshotDescriptor, tableName, family,
+//        mfs.getRootDir(), testUtil.getHBaseAdmin(), mfs.getFileSystem());
   }
 
   /**
@@ -273,18 +271,18 @@ public final class SnapshotTestingUtils {
    */
   public static void waitForSnapshotToComplete(HMaster master,
       HBaseProtos.SnapshotDescription snapshot, long sleep) throws 
ServiceException {
-    final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
-        .setSnapshot(snapshot).build();
-    IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
-        .buildPartial();
-    while (!done.getDone()) {
-      done = master.getMasterRpcServices().isSnapshotDone(null, request);
-      try {
-        Thread.sleep(sleep);
-      } catch (InterruptedException e) {
-        throw new ServiceException(e);
-      }
-    }
+//    final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
+//        .setSnapshot(snapshot).build();
+//    IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
+//        .buildPartial();
+//    while (!done.getDone()) {
+//      done = master.getMasterRpcServices().isSnapshotDone(null, request);
+//      try {
+//        Thread.sleep(sleep);
+//      } catch (InterruptedException e) {
+//        throw new ServiceException(e);
+//      }
+//    }
   }
 
   /*
@@ -321,30 +319,30 @@ public final class SnapshotTestingUtils {
     assertNoSnapshots(admin);
   }
 
-  /**
-   * Expect the snapshot to throw an error when checking if the snapshot is
-   * complete
-   *
-   * @param master master to check
-   * @param snapshot the {@link SnapshotDescription} request to pass to the 
master
-   * @param clazz expected exception from the master
-   */
-  public static void expectSnapshotDoneException(HMaster master,
-      IsSnapshotDoneRequest snapshot,
-      Class<? extends HBaseSnapshotException> clazz) {
-    try {
-      master.getMasterRpcServices().isSnapshotDone(null, snapshot);
-      Assert.fail("didn't fail to lookup a snapshot");
-    } catch (ServiceException se) {
-      try {
-        throw ProtobufUtil.getRemoteException(se);
-      } catch (HBaseSnapshotException e) {
-        assertEquals("Threw wrong snapshot exception!", clazz, e.getClass());
-      } catch (Throwable t) {
-        Assert.fail("Threw an unexpected exception:" + t);
-      }
-    }
-  }
+//  /**
+//   * Expect the snapshot to throw an error when checking if the snapshot is
+//   * complete
+//   *
+//   * @param master master to check
+//   * @param snapshot the {@link SnapshotDescription} request to pass to the 
master
+//   * @param clazz expected exception from the master
+//   */
+//  public static void expectSnapshotDoneException(HMaster master,
+//      IsSnapshotDoneRequest snapshot,
+//      Class<? extends HBaseSnapshotException> clazz) {
+//    try {
+//      master.getMasterRpcServices().isSnapshotDone(null, snapshot);
+//      Assert.fail("didn't fail to lookup a snapshot");
+//    } catch (ServiceException se) {
+//      try {
+//        throw ProtobufUtil.getRemoteException(se);
+//      } catch (HBaseSnapshotException e) {
+//        assertEquals("Threw wrong snapshot exception!", clazz, e.getClass());
+//      } catch (Throwable t) {
+//        Assert.fail("Threw an unexpected exception:" + t);
+//      }
+//    }
+//  }
 
   /**
    * List all the HFiles in the given table
@@ -427,8 +425,9 @@ public final class SnapshotTestingUtils {
     final MasterStorage mfs = 
util.getHBaseCluster().getMaster().getMasterStorage();
     final FileSystem fs = mfs.getFileSystem();
 
-    Path snapshotDir = 
SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
-                                                                        
mfs.getRootDir());
+//    Path snapshotDir = 
SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
+//                                                                        
mfs.getRootDir());
+    Path snapshotDir = null;
     HBaseProtos.SnapshotDescription snapshotDesc =
         SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
     final TableName table = TableName.valueOf(snapshotDesc.getTable());
@@ -701,24 +700,24 @@ public final class SnapshotTestingUtils {
 
         // First region, simple with one plain hfile.
         HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, 
endKey);
-        RegionStorage rfs = RegionStorage.open(conf, fs, tableDir, hri, true);
-        regions[i] = new RegionData(tableDir, hri, 3);
-        for (int j = 0; j < regions[i].files.length; ++j) {
-          Path storeFile = createStoreFile(rfs.createTempName());
-          regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
-        }
+//        RegionStorage rfs = RegionStorage.open(conf, fs, tableDir, hri, 
true);
+//        regions[i] = new RegionData(tableDir, hri, 3);
+//        for (int j = 0; j < regions[i].files.length; ++j) {
+//          Path storeFile = createStoreFile(rfs.createTempName());
+//          regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
+//        }
 
         // Second region, used to test the split case.
         // This region contains a reference to the hfile in the first region.
         startKey = Bytes.toBytes(2 + i * 2);
         endKey = Bytes.toBytes(3 + i * 2);
         hri = new HRegionInfo(htd.getTableName());
-        rfs = RegionStorage.open(conf, fs, tableDir, hri, true);
+//        rfs = RegionStorage.open(conf, fs, tableDir, hri, true);
         regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length);
         for (int j = 0; j < regions[i].files.length; ++j) {
           String refName = regions[i].files[j].getName() + '.' + 
regions[i].hri.getEncodedName();
           Path refFile = createStoreFile(new Path(rootDir, refName));
-          regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
+//          regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
         }
       }
       return regions;
@@ -855,8 +854,8 @@ public final class SnapshotTestingUtils {
       throws IOException {
     // Ensure the archiver to be empty
     MasterStorage mfs = 
util.getMiniHBaseCluster().getMaster().getMasterStorage();
-    Path archiveDir = new Path(mfs.getRootDir(), 
HConstants.HFILE_ARCHIVE_DIRECTORY);
-    mfs.getFileSystem().delete(archiveDir, true);
+//    Path archiveDir = new Path(mfs.getRootDir(), 
HConstants.HFILE_ARCHIVE_DIRECTORY);
+//    mfs.getFileSystem().delete(archiveDir, true);
   }
 
   public static void verifyRowCount(final HBaseTestingUtility util, final 
TableName tableName,

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
index dc6e36b..6d7d4e1 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
@@ -271,7 +271,8 @@ public class TestExportSnapshot {
       conf.setInt("mapreduce.map.maxattempts", 3);
     }
     // Export Snapshot
-    Path sourceDir = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir();
+//    Path sourceDir = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir();
+    Path sourceDir = null;
     int res = ExportSnapshot.innerMain(conf, new String[] {
       "-snapshot", Bytes.toString(snapshotName),
       "-copy-from", sourceDir.toString(),
@@ -355,7 +356,8 @@ public class TestExportSnapshot {
   }
 
   private Path getHdfsDestinationDir() {
-    Path rootDir = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir();
+//    Path rootDir = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir();
+    Path rootDir = null;
     Path path = new Path(new Path(rootDir, "export-test"), "export-" + 
System.currentTimeMillis());
     LOG.info("HDFS export destination path: " + path);
     return path;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
index d51b62a..ac231c2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
@@ -389,23 +389,23 @@ public class TestFlushSnapshotFromClient {
     UTIL.deleteTable(cloneName);
   }
 
-  /**
-   * Basic end-to-end test of simple-flush-based snapshots
-   */
-  @Test
-  public void testFlushCreateListDestroy() throws Exception {
-    LOG.debug("------- Starting Snapshot test -------------");
-    // make sure we don't fail on listing snapshots
-    SnapshotTestingUtils.assertNoSnapshots(admin);
-    // load the table so we have some data
-    SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
-
-    String snapshotName = "flushSnapshotCreateListDestroy";
-    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem();
-    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir();
-    SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, 
Bytes.toString(TEST_FAM),
-      snapshotName, rootDir, fs, true);
-  }
+//  /**
+//   * Basic end-to-end test of simple-flush-based snapshots
+//   */
+//  @Test
+//  public void testFlushCreateListDestroy() throws Exception {
+//    LOG.debug("------- Starting Snapshot test -------------");
+//    // make sure we don't fail on listing snapshots
+//    SnapshotTestingUtils.assertNoSnapshots(admin);
+//    // load the table so we have some data
+//    SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
+//
+//    String snapshotName = "flushSnapshotCreateListDestroy";
+//    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterStorage().getFileSystem();
+//    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterStorage().getRootDir();
+//    SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, 
Bytes.toString(TEST_FAM),
+//      snapshotName, rootDir, fs, true);
+//  }
 
   /**
    * Demonstrate that we reject snapshot requests if there is a snapshot 
already running on the

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
index ab974e76..bb0a39b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
@@ -211,10 +211,11 @@ public class HFileArchiveTestingUtil {
    * @return {@link Path} to the archive directory for the given region
    */
   public static Path getRegionArchiveDir(Configuration conf, HRegion region) 
throws IOException {
-    return HFileArchiveUtil.getRegionArchiveDir(
-        FSUtils.getRootDir(conf),
-        region.getTableDesc().getTableName(),
-        region.getRegionInfo().getEncodedName());
+//    return HFileArchiveUtil.getRegionArchiveDir(
+//        FSUtils.getRootDir(conf),
+//        region.getTableDesc().getTableName(),
+//        region.getRegionInfo().getEncodedName());
+    return null;
   }
 
   /**
@@ -226,8 +227,9 @@ public class HFileArchiveTestingUtil {
    */
   public static Path getStoreArchivePath(Configuration conf, HRegion region, 
Store store)
       throws IOException {
-    return HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(),
-        region.getRegionStorage().getTableDir(), store.getFamily().getName());
+//    return HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(),
+//        region.getRegionStorage().getTableDir(), 
store.getFamily().getName());
+    return null;
   }
 
   public static Path getStoreArchivePath(HBaseTestingUtility util, String 
tableName,

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 27d6dde..6d17f5f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -181,25 +181,25 @@ public class TestFSTableDescriptors {
     assertTrue(htd.equals(td2));
   }
 
-  @Test public void testReadingOldHTDFromFS() throws IOException, 
DeserializationException {
-    final String name = "testReadingOldHTDFromFS";
-    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
-    Path rootdir = UTIL.getDataTestDir(name);
-    FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-    Path tableDir = FSUtils.getTableDir(rootdir, htd.getTableName());
-    fstd.updateTableDescriptor(htd);
-    Path descriptorFile = LegacyTableDescriptor.getTableInfoPath(fs, 
tableDir).getPath();
-    FSUtils.writeFully(fs, descriptorFile, htd.toByteArray(), true);
-    FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-    HTableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
-    assertEquals(htd, td2);
-    FileStatus descriptorFile2 = LegacyTableDescriptor.getTableInfoPath(fs, 
tableDir);
-    byte[] buffer = htd.toByteArray();
-    FSUtils.readFully(fs, descriptorFile2.getPath(), buffer);
-    TableDescriptor td3 = TableDescriptor.parseFrom(buffer);
-    assertEquals(htd, td3);
-  }
+//  @Test public void testReadingOldHTDFromFS() throws IOException, 
DeserializationException {
+//    final String name = "testReadingOldHTDFromFS";
+//    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+//    Path rootdir = UTIL.getDataTestDir(name);
+//    FSTableDescriptors fstd = new 
FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
+//    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+//    Path tableDir = FSUtils.getTableDir(rootdir, htd.getTableName());
+//    fstd.updateTableDescriptor(htd);
+//    Path descriptorFile = LegacyTableDescriptor.getTableInfoPath(fs, 
tableDir).getPath();
+//    FSUtils.writeFully(fs, descriptorFile, htd.toByteArray(), true);
+//    FSTableDescriptors fstd2 = new 
FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
+//    HTableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
+//    assertEquals(htd, td2);
+//    FileStatus descriptorFile2 = LegacyTableDescriptor.getTableInfoPath(fs, 
tableDir);
+//    byte[] buffer = htd.toByteArray();
+//    FSUtils.readFully(fs, descriptorFile2.getPath(), buffer);
+//    TableDescriptor td3 = TableDescriptor.parseFrom(buffer);
+//    assertEquals(htd, td3);
+//  }
 
   @Test public void testHTableDescriptors()
   throws IOException, InterruptedException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index dd7f18e..df0bfcd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
@@ -423,7 +422,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
 
       // Write the .tableinfo
       cluster.getMaster().getMasterStorage().createTableDescriptor(
-        new TableDescriptor(htdDisabled), true);
+        new HTableDescriptor(htdDisabled), true);
       List<HRegionInfo> disabledRegions =
           TEST_UTIL.createMultiRegionsInMeta(conf, htdDisabled, SPLIT_KEYS);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
index 2b1bc8f..acf50a7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
@@ -532,7 +532,8 @@ public final class WALPerformanceEvaluation extends 
Configured implements Tool {
       });
     }
 
-    return HRegion.createHRegion(getConf(), dir, htd, regionInfo, wal);
+//    return HRegion.createHRegion(getConf(), dir, htd, regionInfo, wal);
+    return null;
   }
 
   private void closeRegion(final HRegion region) throws IOException {

Reply via email to