http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
index bc8a41b,0000000..612b98a
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
@@@ -1,376 -1,0 +1,376 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.hadoop.hbase.client;
 +
 +import java.util.List;
 +
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hbase.TableName;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.HColumnDescriptor;
 +import org.apache.hadoop.hbase.HConstants;
 +import org.apache.hadoop.hbase.HRegionInfo;
 +import org.apache.hadoop.hbase.HTableDescriptor;
- import org.apache.hadoop.hbase.LargeTests;
 +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 +import org.apache.hadoop.hbase.mob.MobConstants;
 +import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 +import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
 +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
++import org.apache.hadoop.hbase.testclassification.LargeTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.junit.After;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +/**
 + * Test to verify that the cloned table is independent of the table from 
which it was cloned
 + */
 +@Category(LargeTests.class)
 +public class TestMobSnapshotCloneIndependence {
 +  private static final Log LOG = 
LogFactory.getLog(TestSnapshotCloneIndependence.class);
 +
 +  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 +
 +  private static final int NUM_RS = 2;
 +  private static final String STRING_TABLE_NAME = "test";
 +  private static final String TEST_FAM_STR = "fam";
 +  private static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
 +  private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
 +
 +  /**
 +   * Setup the config for the cluster and start it
 +   * @throws Exception on failure
 +   */
 +  @BeforeClass
 +  public static void setupCluster() throws Exception {
 +    setupConf(UTIL.getConfiguration());
 +    UTIL.startMiniCluster(NUM_RS);
 +  }
 +
 +  private static void setupConf(Configuration conf) {
 +    // enable snapshot support
 +    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
 +    // disable the ui
 +    conf.setInt("hbase.regionsever.info.port", -1);
 +    // change the flush size to a small amount, regulating number of store 
files
 +    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
 +    // so make sure we get a compaction when doing a load, but keep around
 +    // some files in the store
 +    conf.setInt("hbase.hstore.compaction.min", 10);
 +    conf.setInt("hbase.hstore.compactionThreshold", 10);
 +    // block writes if we get to 12 store files
 +    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
 +    conf.setInt("hbase.regionserver.msginterval", 100);
 +    conf.setBoolean("hbase.master.enabletable.roundrobin", true);
 +    // Avoid potentially aggressive splitting which would cause snapshot to 
fail
 +    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
 +      ConstantSizeRegionSplitPolicy.class.getName());
 +    conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
 +  }
 +
 +  @Before
 +  public void setup() throws Exception {
 +    MobSnapshotTestingUtils.createMobTable(UTIL, 
TableName.valueOf(STRING_TABLE_NAME), TEST_FAM);
 +  }
 +
 +  @After
 +  public void tearDown() throws Exception {
 +    UTIL.deleteTable(TABLE_NAME);
 +    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
 +    SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
 +  }
 +
 +  @AfterClass
 +  public static void cleanupTest() throws Exception {
 +    try {
 +      UTIL.shutdownMiniCluster();
 +    } catch (Exception e) {
 +      LOG.warn("failure shutting down cluster", e);
 +    }
 +  }
 +
 +  /**
 +   * Verify that adding data to the cloned table will not affect the 
original, and vice-versa when
 +   * it is taken as an online snapshot.
 +   */
 +  @Test (timeout=300000)
 +  public void testOnlineSnapshotAppendIndependent() throws Exception {
 +    runTestSnapshotAppendIndependent(true);
 +  }
 +
 +  /**
 +   * Verify that adding data to the cloned table will not affect the 
original, and vice-versa when
 +   * it is taken as an offline snapshot.
 +   */
 +  @Test (timeout=300000)
 +  public void testOfflineSnapshotAppendIndependent() throws Exception {
 +    runTestSnapshotAppendIndependent(false);
 +  }
 +
 +  /**
 +   * Verify that adding metadata to the cloned table will not affect the 
original, and vice-versa
 +   * when it is taken as an online snapshot.
 +   */
 +  @Test (timeout=300000)
 +  public void testOnlineSnapshotMetadataChangesIndependent() throws Exception 
{
 +    runTestSnapshotMetadataChangesIndependent(true);
 +  }
 +
 +  /**
 +   * Verify that adding netadata to the cloned table will not affect the 
original, and vice-versa
 +   * when is taken as an online snapshot.
 +   */
 +  @Test (timeout=300000)
 +  public void testOfflineSnapshotMetadataChangesIndependent() throws 
Exception {
 +    runTestSnapshotMetadataChangesIndependent(false);
 +  }
 +
 +  /**
 +   * Verify that region operations, in this case splitting a region, are 
independent between the
 +   * cloned table and the original.
 +   */
 +  @Test (timeout=300000)
 +  public void testOfflineSnapshotRegionOperationsIndependent() throws 
Exception {
 +    runTestRegionOperationsIndependent(false);
 +  }
 +
 +  /**
 +   * Verify that region operations, in this case splitting a region, are 
independent between the
 +   * cloned table and the original.
 +   */
 +  @Test (timeout=300000)
 +  public void testOnlineSnapshotRegionOperationsIndependent() throws 
Exception {
 +    runTestRegionOperationsIndependent(true);
 +  }
 +
 +  private static void waitOnSplit(final HTable t, int originalCount) throws 
Exception {
 +    for (int i = 0; i < 200; i++) {
 +      try {
 +        Thread.sleep(50);
 +      } catch (InterruptedException e) {
 +        // Restore the interrupted status
 +        Thread.currentThread().interrupt();
 +      }
 +      if (t.getRegionLocations().size() > originalCount) {
 +        return;
 +      }
 +    }
 +    throw new Exception("Split did not increase the number of regions");
 +  }
 +
 +  /*
 +   * Take a snapshot of a table, add data, and verify that this only
 +   * affects one table
 +   * @param online - Whether the table is online or not during the snapshot
 +   */
 +  private void runTestSnapshotAppendIndependent(boolean online) throws 
Exception {
 +    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
 +    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
 +
 +    Admin admin = UTIL.getHBaseAdmin();
 +    final long startTime = System.currentTimeMillis();
 +    final TableName localTableName =
 +        TableName.valueOf(STRING_TABLE_NAME + startTime);
 +
 +    HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, 
localTableName, TEST_FAM);
 +    try {
 +
-       SnapshotTestingUtils.loadData(UTIL, original, 500, TEST_FAM);
++      SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
 +      final int origTableRowCount = 
MobSnapshotTestingUtils.countMobRows(original);
 +
 +      // Take a snapshot
 +      final String snapshotNameAsString = "snapshot_" + localTableName;
 +      byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
 +
 +      SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, 
TEST_FAM_STR,
 +        snapshotNameAsString, rootDir, fs, online);
 +
 +      if (!online) {
 +        admin.enableTable(localTableName);
 +      }
 +      TableName cloneTableName = TableName.valueOf("test-clone-" + 
localTableName);
 +      admin.cloneSnapshot(snapshotName, cloneTableName);
 +
 +      HTable clonedTable = new HTable(UTIL.getConfiguration(), 
cloneTableName);
 +
 +      try {
 +        final int clonedTableRowCount = 
MobSnapshotTestingUtils.countMobRows(clonedTable);
 +
 +        Assert.assertEquals(
 +          "The line counts of original and cloned tables do not match after 
clone. ",
 +          origTableRowCount, clonedTableRowCount);
 +
 +        // Attempt to add data to the test
 +        final String rowKey = "new-row-" + System.currentTimeMillis();
 +
 +        Put p = new Put(Bytes.toBytes(rowKey));
 +        p.add(TEST_FAM, Bytes.toBytes("someQualifier"), 
Bytes.toBytes("someString"));
 +        original.put(p);
 +        original.flushCommits();
 +
 +        // Verify that it is not present in the original table
 +        Assert.assertEquals("The row count of the original table was not 
modified by the put",
 +          origTableRowCount + 1, 
MobSnapshotTestingUtils.countMobRows(original));
 +        Assert.assertEquals(
 +          "The row count of the cloned table changed as a result of addition 
to the original",
 +          clonedTableRowCount, 
MobSnapshotTestingUtils.countMobRows(clonedTable));
 +
 +        p = new Put(Bytes.toBytes(rowKey));
 +        p.add(TEST_FAM, Bytes.toBytes("someQualifier"), 
Bytes.toBytes("someString"));
 +        clonedTable.put(p);
 +        clonedTable.flushCommits();
 +
 +        // Verify that the new family is not in the restored table's 
description
 +        Assert.assertEquals(
 +          "The row count of the original table was modified by the put to the 
clone",
 +          origTableRowCount + 1, 
MobSnapshotTestingUtils.countMobRows(original));
 +        Assert.assertEquals("The row count of the cloned table was not 
modified by the put",
 +          clonedTableRowCount + 1, 
MobSnapshotTestingUtils.countMobRows(clonedTable));
 +      } finally {
 +
 +        clonedTable.close();
 +      }
 +    } finally {
 +
 +      original.close();
 +    }
 +  }
 +
 +  /*
 +   * Take a snapshot of a table, do a split, and verify that this only 
affects one table
 +   * @param online - Whether the table is online or not during the snapshot
 +   */
 +  private void runTestRegionOperationsIndependent(boolean online) throws 
Exception {
 +    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
 +    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
 +
 +    // Create a table
 +    Admin admin = UTIL.getHBaseAdmin();
 +    final long startTime = System.currentTimeMillis();
 +    final TableName localTableName =
 +        TableName.valueOf(STRING_TABLE_NAME + startTime);
 +    HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, 
localTableName, TEST_FAM);
-     SnapshotTestingUtils.loadData(UTIL, original, 500, TEST_FAM);
++    SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
 +    final int loadedTableCount = 
MobSnapshotTestingUtils.countMobRows(original);
 +    System.out.println("Original table has: " + loadedTableCount + " rows");
 +
 +    final String snapshotNameAsString = "snapshot_" + localTableName;
 +
 +    // Create a snapshot
 +    SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, 
TEST_FAM_STR,
 +      snapshotNameAsString, rootDir, fs, online);
 +
 +    if (!online) {
 +      admin.enableTable(localTableName);
 +    }
 +
 +    TableName cloneTableName = TableName.valueOf("test-clone-" + 
localTableName);
 +
 +    // Clone the snapshot
 +    byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
 +    admin.cloneSnapshot(snapshotName, cloneTableName);
 +
 +    // Verify that region information is the same pre-split
 +    original.clearRegionCache();
 +    List<HRegionInfo> originalTableHRegions = 
admin.getTableRegions(localTableName);
 +
 +    final int originalRegionCount = originalTableHRegions.size();
 +    final int cloneTableRegionCount = 
admin.getTableRegions(cloneTableName).size();
 +    Assert.assertEquals(
 +      "The number of regions in the cloned table is different than in the 
original table.",
 +      originalRegionCount, cloneTableRegionCount);
 +
 +    // Split a region on the parent table
 +    admin.splitRegion(originalTableHRegions.get(0).getRegionName());
 +    waitOnSplit(original, originalRegionCount);
 +
 +    // Verify that the cloned table region is not split
 +    final int cloneTableRegionCount2 = 
admin.getTableRegions(cloneTableName).size();
 +    Assert.assertEquals(
 +      "The number of regions in the cloned table changed though none of its 
regions were split.",
 +      cloneTableRegionCount, cloneTableRegionCount2);
 +  }
 +
 +  /*
 +   * Take a snapshot of a table, add metadata, and verify that this only
 +   * affects one table
 +   * @param online - Whether the table is online or not during the snapshot
 +   */
 +  private void runTestSnapshotMetadataChangesIndependent(boolean online) 
throws Exception {
 +    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
 +    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
 +
 +    // Create a table
 +    Admin admin = UTIL.getHBaseAdmin();
 +    final long startTime = System.currentTimeMillis();
 +    final TableName localTableName =
 +        TableName.valueOf(STRING_TABLE_NAME + startTime);
 +    HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, 
localTableName, TEST_FAM);
-     SnapshotTestingUtils.loadData(UTIL, original, 500, TEST_FAM);
++    SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
 +
 +    final String snapshotNameAsString = "snapshot_" + localTableName;
 +
 +    // Create a snapshot
 +    SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, 
TEST_FAM_STR,
 +      snapshotNameAsString, rootDir, fs, online);
 +
 +    if (!online) {
 +      admin.enableTable(localTableName);
 +    }
 +    TableName cloneTableName = TableName.valueOf("test-clone-" + 
localTableName);
 +
 +    // Clone the snapshot
 +    byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
 +    admin.cloneSnapshot(snapshotName, cloneTableName);
 +
 +    // Add a new column family to the original table
 +    byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
 +    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2);
 +
 +    admin.disableTable(localTableName);
 +    admin.addColumn(localTableName, hcd);
 +
 +    // Verify that it is not in the snapshot
 +    admin.enableTable(localTableName);
 +
 +    // get a description of the cloned table
 +    // get a list of its families
 +    // assert that the family is there
 +    HTableDescriptor originalTableDescriptor = original.getTableDescriptor();
 +    HTableDescriptor clonedTableDescriptor = 
admin.getTableDescriptor(cloneTableName);
 +
 +    Assert.assertTrue("The original family was not found. There is something 
wrong. ",
 +      originalTableDescriptor.hasFamily(TEST_FAM));
 +    Assert.assertTrue("The original family was not found in the clone. There 
is something wrong. ",
 +      clonedTableDescriptor.hasFamily(TEST_FAM));
 +
 +    Assert.assertTrue("The new family was not found. ",
 +      originalTableDescriptor.hasFamily(TEST_FAM_2));
 +    Assert.assertTrue("The new family was not found. ",
 +      !clonedTableDescriptor.hasFamily(TEST_FAM_2));
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
index 947354f,0000000..5bf5a30
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
@@@ -1,304 -1,0 +1,305 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hbase.client;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.fail;
 +
 +import java.util.List;
 +
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.HConstants;
- import org.apache.hadoop.hbase.LargeTests;
 +import org.apache.hadoop.hbase.TableName;
 +import org.apache.hadoop.hbase.TableNotFoundException;
 +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 +import org.apache.hadoop.hbase.mob.MobConstants;
 +import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 +import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 +import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
 +import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 +import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
 +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
++import org.apache.hadoop.hbase.testclassification.ClientTests;
++import org.apache.hadoop.hbase.testclassification.LargeTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.apache.hadoop.hbase.util.FSUtils;
 +import org.junit.After;
 +import org.junit.AfterClass;
 +import org.junit.Before;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +import com.google.common.collect.Lists;
 +
 +/**
 + * Test create/using/deleting snapshots from the client
 + * <p>
 + * This is an end-to-end test for the snapshot utility
 + */
- @Category(LargeTests.class)
++@Category({LargeTests.class, ClientTests.class})
 +public class TestMobSnapshotFromClient {
 +  private static final Log LOG = 
LogFactory.getLog(TestSnapshotFromClient.class);
 +  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 +  private static final int NUM_RS = 2;
 +  private static final String STRING_TABLE_NAME = "test";
 +  protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
 +  protected static final TableName TABLE_NAME =
 +      TableName.valueOf(STRING_TABLE_NAME);
 +
 +  /**
 +   * Setup the config for the cluster
 +   * @throws Exception on failure
 +   */
 +  @BeforeClass
 +  public static void setupCluster() throws Exception {
 +    setupConf(UTIL.getConfiguration());
 +    UTIL.startMiniCluster(NUM_RS);
 +  }
 +
 +  private static void setupConf(Configuration conf) {
 +    // disable the ui
 +    conf.setInt("hbase.regionsever.info.port", -1);
 +    // change the flush size to a small amount, regulating number of store 
files
 +    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
 +    // so make sure we get a compaction when doing a load, but keep around 
some
 +    // files in the store
 +    conf.setInt("hbase.hstore.compaction.min", 10);
 +    conf.setInt("hbase.hstore.compactionThreshold", 10);
 +    // block writes if we get to 12 store files
 +    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
 +    // Enable snapshot
 +    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
 +    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
 +      ConstantSizeRegionSplitPolicy.class.getName());
 +    conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
 +  }
 +
 +  @Before
 +  public void setup() throws Exception {
 +    MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 
getNumReplicas(), TEST_FAM);
 +  }
 +
 +  protected int getNumReplicas() {
 +    return 1;
 +  }
 +
 +  @After
 +  public void tearDown() throws Exception {
 +    UTIL.deleteTable(TABLE_NAME);
 +    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
 +    SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
 +  }
 +
 +  @AfterClass
 +  public static void cleanupTest() throws Exception {
 +    try {
 +      UTIL.shutdownMiniCluster();
 +    } catch (Exception e) {
 +      LOG.warn("failure shutting down cluster", e);
 +    }
 +  }
 +
 +  /**
 +   * Test snapshotting not allowed hbase:meta and -ROOT-
 +   * @throws Exception
 +   */
 +  @Test (timeout=300000)
 +  public void testMetaTablesSnapshot() throws Exception {
 +    Admin admin = UTIL.getHBaseAdmin();
 +    byte[] snapshotName = Bytes.toBytes("metaSnapshot");
 +
 +    try {
 +      admin.snapshot(snapshotName, TableName.META_TABLE_NAME);
 +      fail("taking a snapshot of hbase:meta should not be allowed");
 +    } catch (IllegalArgumentException e) {
 +      // expected
 +    }
 +  }
 +
 +  /**
 +   * Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose 
names match the parameter
 +   *
 +   * @throws Exception
 +   */
 +  @Test (timeout=300000)
 +  public void testSnapshotDeletionWithRegex() throws Exception {
 +    Admin admin = UTIL.getHBaseAdmin();
 +    // make sure we don't fail on listing snapshots
 +    SnapshotTestingUtils.assertNoSnapshots(admin);
 +
 +    // put some stuff in the table
 +    HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
 +    UTIL.loadTable(table, TEST_FAM);
 +    table.close();
 +
 +    byte[] snapshot1 = Bytes.toBytes("TableSnapshot1");
 +    admin.snapshot(snapshot1, TABLE_NAME);
 +    LOG.debug("Snapshot1 completed.");
 +
 +    byte[] snapshot2 = Bytes.toBytes("TableSnapshot2");
 +    admin.snapshot(snapshot2, TABLE_NAME);
 +    LOG.debug("Snapshot2 completed.");
 +
 +    String snapshot3 = "3rdTableSnapshot";
 +    admin.snapshot(Bytes.toBytes(snapshot3), TABLE_NAME);
 +    LOG.debug(snapshot3 + " completed.");
 +
 +    // delete the first two snapshots
 +    admin.deleteSnapshots("TableSnapshot.*");
 +    List<SnapshotDescription> snapshots = admin.listSnapshots();
 +    assertEquals(1, snapshots.size());
 +    assertEquals(snapshots.get(0).getName(), snapshot3);
 +
 +    admin.deleteSnapshot(snapshot3);
 +    admin.close();
 +  }
 +  /**
 +   * Test snapshotting a table that is offline
 +   * @throws Exception
 +   */
 +  @Test (timeout=300000)
 +  public void testOfflineTableSnapshot() throws Exception {
 +    Admin admin = UTIL.getHBaseAdmin();
 +    // make sure we don't fail on listing snapshots
 +    SnapshotTestingUtils.assertNoSnapshots(admin);
 +
 +    // put some stuff in the table
 +    HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
 +    UTIL.loadTable(table, TEST_FAM, false);
 +
 +    LOG.debug("FS state before disable:");
 +    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
 +      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
 +    // XXX if this is flakey, might want to consider using the async version 
and looping as
 +    // disableTable can succeed and still timeout.
 +    admin.disableTable(TABLE_NAME);
 +
 +    LOG.debug("FS state before snapshot:");
 +    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
 +      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
 +
 +    // take a snapshot of the disabled table
 +    final String SNAPSHOT_NAME = "offlineTableSnapshot";
 +    byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
 +
 +    SnapshotDescription desc = SnapshotDescription.newBuilder()
 +      .setType(SnapshotDescription.Type.DISABLED)
 +      .setTable(STRING_TABLE_NAME)
 +      .setName(SNAPSHOT_NAME)
 +      .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
 +      .build();
 +    admin.snapshot(desc);
 +    LOG.debug("Snapshot completed.");
 +
 +    // make sure we have the snapshot
 +    List<SnapshotDescription> snapshots = 
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
 +      snapshot, TABLE_NAME);
 +
 +    // make sure its a valid snapshot
 +    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
 +    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
 +    LOG.debug("FS state after snapshot:");
 +    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
 +      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
 +
 +    SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, 
TEST_FAM, rootDir,
 +      admin, fs);
 +
 +    admin.deleteSnapshot(snapshot);
 +    snapshots = admin.listSnapshots();
 +    SnapshotTestingUtils.assertNoSnapshots(admin);
 +  }
 +
 +  @Test (timeout=300000)
 +  public void testSnapshotFailsOnNonExistantTable() throws Exception {
 +    Admin admin = UTIL.getHBaseAdmin();
 +    // make sure we don't fail on listing snapshots
 +    SnapshotTestingUtils.assertNoSnapshots(admin);
 +    String tableName = "_not_a_table";
 +
 +    // make sure the table doesn't exist
 +    boolean fail = false;
 +    do {
 +    try {
 +      admin.getTableDescriptor(TableName.valueOf(tableName));
 +      fail = true;
 +          LOG.error("Table:" + tableName + " already exists, checking a new 
name");
 +      tableName = tableName+"!";
 +    } catch (TableNotFoundException e) {
 +      fail = false;
 +      }
 +    } while (fail);
 +
 +    // snapshot the non-existant table
 +    try {
 +      admin.snapshot("fail", TableName.valueOf(tableName));
 +      fail("Snapshot succeeded even though there is not table.");
 +    } catch (SnapshotCreationException e) {
 +      LOG.info("Correctly failed to snapshot a non-existant table:" + 
e.getMessage());
 +    }
 +  }
 +
 +  @Test (timeout=300000)
 +  public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
 +    // test with an empty table with one region
 +
 +    Admin admin = UTIL.getHBaseAdmin();
 +    // make sure we don't fail on listing snapshots
 +    SnapshotTestingUtils.assertNoSnapshots(admin);
 +
 +    LOG.debug("FS state before disable:");
 +    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
 +      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
 +    admin.disableTable(TABLE_NAME);
 +
 +    LOG.debug("FS state before snapshot:");
 +    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
 +      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
 +
 +    // take a snapshot of the disabled table
 +    byte[] snapshot = 
Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegions");
 +    admin.snapshot(snapshot, TABLE_NAME);
 +    LOG.debug("Snapshot completed.");
 +
 +    // make sure we have the snapshot
 +    List<SnapshotDescription> snapshots = 
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
 +      snapshot, TABLE_NAME);
 +
 +    // make sure its a valid snapshot
 +    FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
 +    Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
 +    LOG.debug("FS state after snapshot:");
 +    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
 +      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
 +
 +    List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the 
region
 +    List<byte[]> nonEmptyCfs = Lists.newArrayList();
 +    SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, 
nonEmptyCfs, emptyCfs,
 +      rootDir, admin, fs);
 +
 +    admin.deleteSnapshot(snapshot);
 +    snapshots = admin.listSnapshots();
 +    SnapshotTestingUtils.assertNoSnapshots(admin);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
index b39dd2a,0000000..e0e9541
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
@@@ -1,154 -1,0 +1,154 @@@
 +/**
 + *
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hbase.mob;
 +
 +import junit.framework.TestCase;
 +
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hbase.HBaseConfiguration;
 +import org.apache.hadoop.hbase.KeyValue;
 +import org.apache.hadoop.hbase.KeyValue.Type;
 +import org.apache.hadoop.hbase.KeyValueUtil;
- import org.apache.hadoop.hbase.SmallTests;
 +import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 +import org.apache.hadoop.hbase.io.hfile.HFileContext;
 +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 +import org.apache.hadoop.hbase.regionserver.StoreFile;
++import org.apache.hadoop.hbase.testclassification.SmallTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.apache.hadoop.hbase.util.FSUtils;
 +import org.junit.Assert;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(SmallTests.class)
 +public class TestCachedMobFile extends TestCase{
 +  static final Log LOG = LogFactory.getLog(TestCachedMobFile.class);
 +  private Configuration conf = HBaseConfiguration.create();
 +  private CacheConfig cacheConf = new CacheConfig(conf);
 +  private final String TABLE = "tableName";
 +  private final String FAMILY = "familyName";
 +  private final String FAMILY1 = "familyName1";
 +  private final String FAMILY2 = "familyName2";
 +  private final long EXPECTED_REFERENCE_ZERO = 0;
 +  private final long EXPECTED_REFERENCE_ONE = 1;
 +  private final long EXPECTED_REFERENCE_TWO = 2;
 +
 +  @Test
 +  public void testOpenClose() throws Exception {
 +    String caseName = getName();
 +    FileSystem fs = FileSystem.get(conf);
 +    Path testDir = FSUtils.getRootDir(conf);
 +    Path outputDir = new Path(new Path(testDir, TABLE),
 +        FAMILY);
 +    HFileContext meta = new 
HFileContextBuilder().withBlockSize(8*1024).build();
 +    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
 +        .withOutputDir(outputDir).withFileContext(meta).build();
 +    MobTestUtil.writeStoreFile(writer, caseName);
 +    CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), 
conf, cacheConf);
 +    Assert.assertEquals(EXPECTED_REFERENCE_ZERO, 
cachedMobFile.getReferenceCount());
 +    cachedMobFile.open();
 +    Assert.assertEquals(EXPECTED_REFERENCE_ONE, 
cachedMobFile.getReferenceCount());
 +    cachedMobFile.open();
 +    Assert.assertEquals(EXPECTED_REFERENCE_TWO, 
cachedMobFile.getReferenceCount());
 +    cachedMobFile.close();
 +    Assert.assertEquals(EXPECTED_REFERENCE_ONE, 
cachedMobFile.getReferenceCount());
 +    cachedMobFile.close();
 +    Assert.assertEquals(EXPECTED_REFERENCE_ZERO, 
cachedMobFile.getReferenceCount());
 +  }
 +
 +  @Test
 +  public void testCompare() throws Exception {
 +    String caseName = getName();
 +    FileSystem fs = FileSystem.get(conf);
 +    Path testDir = FSUtils.getRootDir(conf);
 +    Path outputDir1 = new Path(new Path(testDir, TABLE),
 +        FAMILY1);
 +    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 
1024).build();
 +    StoreFile.Writer writer1 = new StoreFile.WriterBuilder(conf, cacheConf, 
fs)
 +        .withOutputDir(outputDir1).withFileContext(meta).build();
 +    MobTestUtil.writeStoreFile(writer1, caseName);
 +    CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, 
writer1.getPath(), conf, cacheConf);
 +    Path outputDir2 = new Path(new Path(testDir, TABLE),
 +        FAMILY2);
 +    StoreFile.Writer writer2 = new StoreFile.WriterBuilder(conf, cacheConf, 
fs)
 +    .withOutputDir(outputDir2)
 +    .withFileContext(meta)
 +    .build();
 +    MobTestUtil.writeStoreFile(writer2, caseName);
 +    CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, 
writer2.getPath(), conf, cacheConf);
 +    cachedMobFile1.access(1);
 +    cachedMobFile2.access(2);
 +    Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile2), 1);
 +    Assert.assertEquals(cachedMobFile2.compareTo(cachedMobFile1), -1);
 +    Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile1), 0);
 +  }
 +
 +  @Test
 +  public void testReadKeyValue() throws Exception {
 +    FileSystem fs = FileSystem.get(conf);
 +    Path testDir = FSUtils.getRootDir(conf);
 +    Path outputDir = new Path(new Path(testDir, TABLE), "familyname");
 +    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 
1024).build();
 +    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
 +        .withOutputDir(outputDir).withFileContext(meta).build();
 +    String caseName = getName();
 +    MobTestUtil.writeStoreFile(writer, caseName);
 +    CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), 
conf, cacheConf);
 +    byte[] family = Bytes.toBytes(caseName);
 +    byte[] qualify = Bytes.toBytes(caseName);
 +    // Test the start key
 +    byte[] startKey = Bytes.toBytes("aa");  // The start key bytes
 +    KeyValue expectedKey =
 +        new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, 
startKey);
 +    KeyValue seekKey = expectedKey.createKeyOnly(false);
 +    KeyValue kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, 
false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the end key
 +    byte[] endKey = Bytes.toBytes("zz");  // The end key bytes
 +    expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, 
Type.Put, endKey);
 +    seekKey = expectedKey.createKeyOnly(false);
 +    kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the random key
 +    byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
 +    expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, 
Type.Put, randomKey);
 +    seekKey = expectedKey.createKeyOnly(false);
 +    kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the key which is less than the start key
 +    byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa"
 +    expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, 
Type.Put, startKey);
 +    seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, 
Type.Put, lowerKey);
 +    kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the key which is more than the end key
 +    byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz"
 +    seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, 
Type.Put, upperKey);
 +    kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
 +    Assert.assertNull(kv);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
index 3432139,0000000..5e3a695
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
@@@ -1,193 -1,0 +1,193 @@@
 +/**
 +*
 +* Licensed to the Apache Software Foundation (ASF) under one
 +* or more contributor license agreements.  See the NOTICE file
 +* distributed with this work for additional information
 +* regarding copyright ownership.  The ASF licenses this file
 +* to you under the Apache License, Version 2.0 (the
 +* "License"); you may not use this file except in compliance
 +* with the License.  You may obtain a copy of the License at
 +*
 +*     http://www.apache.org/licenses/LICENSE-2.0
 +*
 +* Unless required by applicable law or agreed to in writing, software
 +* distributed under the License is distributed on an "AS IS" BASIS,
 +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 +* See the License for the specific language governing permissions and
 +* limitations under the License.
 +*/
 +package org.apache.hadoop.hbase.mob;
 +
 +import java.io.IOException;
 +import java.util.List;
 +
 +import org.apache.hadoop.hbase.Cell;
 +import org.apache.hadoop.hbase.CellUtil;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.HColumnDescriptor;
 +import org.apache.hadoop.hbase.HTableDescriptor;
- import org.apache.hadoop.hbase.LargeTests;
 +import org.apache.hadoop.hbase.MasterNotRunningException;
 +import org.apache.hadoop.hbase.TableName;
 +import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 +import org.apache.hadoop.hbase.client.HBaseAdmin;
 +import org.apache.hadoop.hbase.client.HTable;
 +import org.apache.hadoop.hbase.client.Put;
 +import org.apache.hadoop.hbase.client.Result;
 +import org.apache.hadoop.hbase.client.ResultScanner;
 +import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.testclassification.LargeTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(LargeTests.class)
 +public class TestDefaultMobStoreFlusher {
 +
 + private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
 + private final static byte [] row1 = Bytes.toBytes("row1");
 + private final static byte [] row2 = Bytes.toBytes("row2");
 + private final static byte [] family = Bytes.toBytes("family");
 + private final static byte [] qf1 = Bytes.toBytes("qf1");
 + private final static byte [] qf2 = Bytes.toBytes("qf2");
 + private final static byte [] value1 = Bytes.toBytes("value1");
 + private final static byte [] value2 = Bytes.toBytes("value2");
 +
 + @BeforeClass
 + public static void setUpBeforeClass() throws Exception {
 +   TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
 +   
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", 
true);
 +
 +   TEST_UTIL.startMiniCluster(1);
 + }
 +
 + @AfterClass
 + public static void tearDownAfterClass() throws Exception {
 +   TEST_UTIL.shutdownMiniCluster();
 + }
 +
 + @Test
 + public void testFlushNonMobFile() throws InterruptedException {
 +   String TN = "testFlushNonMobFile";
 +   HTable table = null;
 +   HBaseAdmin admin = null;
 +
 +   try {
 +     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TN));
 +     HColumnDescriptor hcd = new HColumnDescriptor(family);
 +     hcd.setMaxVersions(4);
 +     desc.addFamily(hcd);
 +
 +     admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
 +     admin.createTable(desc);
 +     table = new HTable(TEST_UTIL.getConfiguration(), TN);
 +
 +     //Put data
 +     Put put0 = new Put(row1);
 +     put0.add(family, qf1, 1, value1);
 +     table.put(put0);
 +
 +     //Put more data
 +     Put put1 = new Put(row2);
 +     put1.add(family, qf2, 1, value2);
 +     table.put(put1);
 +
 +     //Flush
 +     table.flushCommits();
 +     admin.flush(TN);
 +
 +     Scan scan = new Scan();
 +     scan.addColumn(family, qf1);
 +     scan.setMaxVersions(4);
 +     ResultScanner scanner = table.getScanner(scan);
 +
 +     //Compare
 +     Result result = scanner.next();
 +     int size = 0;
 +     while (result != null) {
 +       size++;
 +       List<Cell> cells = result.getColumnCells(family, qf1);
 +       // Verify the cell size
 +       Assert.assertEquals(1, cells.size());
 +       // Verify the value
 +       Assert.assertEquals(Bytes.toString(value1),
 +           Bytes.toString(CellUtil.cloneValue(cells.get(0))));
 +       result = scanner.next();
 +     }
 +     scanner.close();
 +     Assert.assertEquals(1, size);
 +     admin.close();
 +   } catch (MasterNotRunningException e1) {
 +     e1.printStackTrace();
 +   } catch (ZooKeeperConnectionException e2) {
 +     e2.printStackTrace();
 +   } catch (IOException e3) {
 +     e3.printStackTrace();
 +   }
 + }
 +
 + @Test
 + public void testFlushMobFile() throws InterruptedException {
 +   String TN = "testFlushMobFile";
 +   HTable table = null;
 +   HBaseAdmin admin = null;
 +
 +   try {
 +     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TN));
 +     HColumnDescriptor hcd = new HColumnDescriptor(family);
 +     hcd.setMobEnabled(true);
 +     hcd.setMobThreshold(3L);
 +     hcd.setMaxVersions(4);
 +     desc.addFamily(hcd);
 +
 +     admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
 +     admin.createTable(desc);
 +     table = new HTable(TEST_UTIL.getConfiguration(), TN);
 +
 +     //put data
 +     Put put0 = new Put(row1);
 +     put0.add(family, qf1, 1, value1);
 +     table.put(put0);
 +
 +     //put more data
 +     Put put1 = new Put(row2);
 +     put1.add(family, qf2, 1, value2);
 +     table.put(put1);
 +
 +     //flush
 +     table.flushCommits();
 +     admin.flush(TN);
 +
 +     //Scan
 +     Scan scan = new Scan();
 +     scan.addColumn(family, qf1);
 +     scan.setMaxVersions(4);
 +     ResultScanner scanner = table.getScanner(scan);
 +
 +     //Compare
 +     Result result = scanner.next();
 +     int size = 0;
 +     while (result != null) {
 +       size++;
 +       List<Cell> cells = result.getColumnCells(family, qf1);
 +       // Verify the the cell size
 +       Assert.assertEquals(1, cells.size());
 +       // Verify the value
 +       Assert.assertEquals(Bytes.toString(value1),
 +           Bytes.toString(CellUtil.cloneValue(cells.get(0))));
 +       result = scanner.next();
 +     }
 +     scanner.close();
 +     Assert.assertEquals(1, size);
 +     admin.close();
 +   } catch (MasterNotRunningException e1) {
 +     e1.printStackTrace();
 +   } catch (ZooKeeperConnectionException e2) {
 +     e2.printStackTrace();
 +   } catch (IOException e3) {
 +     e3.printStackTrace();
 +   }
 + }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
index ea50075,0000000..f16fa20
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
@@@ -1,179 -1,0 +1,179 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hbase.mob;
 +
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.Random;
 +
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.HColumnDescriptor;
 +import org.apache.hadoop.hbase.HTableDescriptor;
- import org.apache.hadoop.hbase.MediumTests;
 +import org.apache.hadoop.hbase.TableName;
 +import org.apache.hadoop.hbase.client.Admin;
 +import org.apache.hadoop.hbase.client.HTable;
 +import org.apache.hadoop.hbase.client.Put;
 +import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
 +import org.apache.hadoop.hbase.mob.MobUtils;
++import org.apache.hadoop.hbase.testclassification.MediumTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.junit.After;
 +import org.junit.AfterClass;
 +import org.junit.Before;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(MediumTests.class)
 +public class TestExpiredMobFileCleaner {
 +
 +  private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
 +  private final static TableName tableName = 
TableName.valueOf("TestExpiredMobFileCleaner");
 +  private final static String family = "family";
 +  private final static byte[] row1 = Bytes.toBytes("row1");
 +  private final static byte[] row2 = Bytes.toBytes("row2");
 +  private final static byte[] qf = Bytes.toBytes("qf");
 +
 +  private static HTable table;
 +  private static Admin admin;
 +
 +  @BeforeClass
 +  public static void setUpBeforeClass() throws Exception {
 +    TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
 +    
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", 
true);
 +
 +    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
 +  }
 +
 +  @AfterClass
 +  public static void tearDownAfterClass() throws Exception {
 +
 +  }
 +
 +  @Before
 +  public void setUp() throws Exception {
 +    TEST_UTIL.startMiniCluster(1);
 +  }
 +
 +  @After
 +  public void tearDown() throws Exception {
 +    admin.disableTable(tableName);
 +    admin.deleteTable(tableName);
 +    admin.close();
 +    TEST_UTIL.shutdownMiniCluster();
 +    TEST_UTIL.getTestFileSystem().delete(TEST_UTIL.getDataTestDir(), true);
 +  }
 +
 +  private void init() throws Exception {
 +    HTableDescriptor desc = new HTableDescriptor(tableName);
 +    HColumnDescriptor hcd = new HColumnDescriptor(family);
 +    hcd.setMobEnabled(true);
 +    hcd.setMobThreshold(3L);
 +    hcd.setMaxVersions(4);
 +    desc.addFamily(hcd);
 +
 +    admin = TEST_UTIL.getHBaseAdmin();
 +    admin.createTable(desc);
 +    table = new HTable(TEST_UTIL.getConfiguration(), tableName);
 +    table.setAutoFlush(false, false);
 +  }
 +
 +  private void modifyColumnExpiryDays(int expireDays) throws Exception {
 +    HColumnDescriptor hcd = new HColumnDescriptor(family);
 +    hcd.setMobEnabled(true);
 +    hcd.setMobThreshold(3L);
 +    // change ttl as expire days to make some row expired
 +    int timeToLive = expireDays * secondsOfDay();
 +    hcd.setTimeToLive(timeToLive);
 +
 +    admin.modifyColumn(tableName, hcd);
 +  }
 +
 +  private void putKVAndFlush(HTable table, byte[] row, byte[] value, long ts)
 +      throws Exception {
 +
 +    Put put = new Put(row, ts);
 +    put.add(Bytes.toBytes(family), qf, value);
 +    table.put(put);
 +
 +    table.flushCommits();
 +    admin.flush(tableName);
 +  }
 +
 +  /**
 +   * Creates a 3 day old hfile and an 1 day old hfile then sets expiry to 2 
days.
 +   * Verifies that the 3 day old hfile is removed but the 1 day one is still 
present
 +   * after the expiry based cleaner is run.
 +   */
 +  @Test
 +  public void testCleaner() throws Exception {
 +    init();
 +
 +    Path mobDirPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), 
tableName, family);
 +
 +    byte[] dummyData = makeDummyData(600);
 +    long ts = System.currentTimeMillis() - 3 * secondsOfDay() * 1000; // 3 
days before
 +    putKVAndFlush(table, row1, dummyData, ts);
 +    FileStatus[] firstFiles = 
TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
 +    //the first mob file
 +    assertEquals("Before cleanup without delay 1", 1, firstFiles.length);
 +    String firstFile = firstFiles[0].getPath().getName();
 +
 +    ts = System.currentTimeMillis() - 1 * secondsOfDay() * 1000; // 1 day 
before
 +    putKVAndFlush(table, row2, dummyData, ts);
 +    FileStatus[] secondFiles = 
TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
 +    //now there are 2 mob files
 +    assertEquals("Before cleanup without delay 2", 2, secondFiles.length);
 +    String f1 = secondFiles[0].getPath().getName();
 +    String f2 = secondFiles[1].getPath().getName();
 +    String secondFile = f1.equals(firstFile) ? f2 : f1;
 +
 +    modifyColumnExpiryDays(2); // ttl = 2, make the first row expired
 +
 +    //run the cleaner
 +    String[] args = new String[2];
 +    args[0] = tableName.getNameAsString();
 +    args[1] = family;
 +    ToolRunner.run(TEST_UTIL.getConfiguration(), new ExpiredMobFileCleaner(), 
args);
 +
 +    FileStatus[] filesAfterClean = 
TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
 +    String lastFile = filesAfterClean[0].getPath().getName();
 +    //the first mob fie is removed
 +    assertEquals("After cleanup without delay 1", 1, filesAfterClean.length);
 +    assertEquals("After cleanup without delay 2", secondFile, lastFile);
 +  }
 +
 +  private Path getMobFamilyPath(Configuration conf, TableName tableName, 
String familyName) {
 +    Path p = new Path(MobUtils.getMobRegionPath(conf, tableName), familyName);
 +    return p;
 +  }
 +
 +  private int secondsOfDay() {
 +    return 24 * 3600;
 +  }
 +
 +  private byte[] makeDummyData(int size) {
 +    byte [] dummyData = new byte[size];
 +    new Random().nextBytes(dummyData);
 +    return dummyData;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
index 163dd25,0000000..055eac3
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
@@@ -1,141 -1,0 +1,141 @@@
 +/**
 +*
 +* Licensed to the Apache Software Foundation (ASF) under one
 +* or more contributor license agreements.  See the NOTICE file
 +* distributed with this work for additional information
 +* regarding copyright ownership.  The ASF licenses this file
 +* to you under the Apache License, Version 2.0 (the
 +* "License"); you may not use this file except in compliance
 +* with the License.  You may obtain a copy of the License at
 +*
 +*     http://www.apache.org/licenses/LICENSE-2.0
 +*
 +* Unless required by applicable law or agreed to in writing, software
 +* distributed under the License is distributed on an "AS IS" BASIS,
 +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 +* See the License for the specific language governing permissions and
 +* limitations under the License.
 +*/
 +package org.apache.hadoop.hbase.mob;
 +
 +import java.util.List;
 +import java.util.Random;
 +
 +import org.apache.hadoop.hbase.Cell;
 +import org.apache.hadoop.hbase.CellUtil;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.HColumnDescriptor;
 +import org.apache.hadoop.hbase.HTableDescriptor;
- import org.apache.hadoop.hbase.MediumTests;
 +import org.apache.hadoop.hbase.TableName;
 +import org.apache.hadoop.hbase.client.HBaseAdmin;
 +import org.apache.hadoop.hbase.client.HTable;
 +import org.apache.hadoop.hbase.client.Put;
 +import org.apache.hadoop.hbase.client.Result;
 +import org.apache.hadoop.hbase.client.ResultScanner;
 +import org.apache.hadoop.hbase.client.Scan;
 +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
++import org.apache.hadoop.hbase.testclassification.MediumTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(MediumTests.class)
 +public class TestMobDataBlockEncoding {
 +
 +  private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
 +  private final static byte [] row1 = Bytes.toBytes("row1");
 +  private final static byte [] family = Bytes.toBytes("family");
 +  private final static byte [] qf1 = Bytes.toBytes("qualifier1");
 +  private final static byte [] qf2 = Bytes.toBytes("qualifier2");
 +  protected final byte[] qf3 = Bytes.toBytes("qualifier3");
 +  private static HTable table;
 +  private static HBaseAdmin admin;
 +  private static HColumnDescriptor hcd;
 +  private static HTableDescriptor desc;
 +  private static Random random = new Random();
 +  private static long defaultThreshold = 10;
 +
 +  @BeforeClass
 +  public static void setUpBeforeClass() throws Exception {
 +    TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
 +    
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", 
true);
 +
 +    TEST_UTIL.startMiniCluster(1);
 +  }
 +
 +  @AfterClass
 +  public static void tearDownAfterClass() throws Exception {
 +    TEST_UTIL.shutdownMiniCluster();
 +  }
 +
 +  public void setUp(long threshold, String TN, DataBlockEncoding encoding)
 +      throws Exception {
 +    desc = new HTableDescriptor(TableName.valueOf(TN));
 +    hcd = new HColumnDescriptor(family);
 +    hcd.setMobEnabled(true);
 +    hcd.setMobThreshold(threshold);
 +    hcd.setMaxVersions(4);
 +    hcd.setDataBlockEncoding(encoding);
 +    desc.addFamily(hcd);
 +    admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
 +    admin.createTable(desc);
 +    table = new HTable(TEST_UTIL.getConfiguration(), TN);
 +  }
 +
 +  /**
 +   * Generate the mob value.
 +   *
 +   * @param size the size of the value
 +   * @return the mob value generated
 +   */
 +  private static byte[] generateMobValue(int size) {
 +    byte[] mobVal = new byte[size];
 +    random.nextBytes(mobVal);
 +    return mobVal;
 +  }
 +
 +  @Test
 +  public void testDataBlockEncoding() throws Exception {
 +    for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
 +      testDataBlockEncoding(encoding);
 +    }
 +  }
 +
 +  public void testDataBlockEncoding(DataBlockEncoding encoding) throws 
Exception {
 +    String TN = "testDataBlockEncoding" + encoding;
 +    setUp(defaultThreshold, TN, encoding);
 +    long ts1 = System.currentTimeMillis();
 +    long ts2 = ts1 + 1;
 +    long ts3 = ts1 + 2;
 +    byte[] value = generateMobValue((int) defaultThreshold + 1);
 +
 +    Put put1 = new Put(row1);
 +    put1.add(family, qf1, ts3, value);
 +    put1.add(family, qf2, ts2, value);
 +    put1.add(family, qf3, ts1, value);
 +    table.put(put1);
 +
 +    table.flushCommits();
 +    admin.flush(TN);
 +
 +    Scan scan = new Scan();
 +    scan.setMaxVersions(4);
 +
 +    ResultScanner results = table.getScanner(scan);
 +    int count = 0;
 +    for (Result res : results) {
 +      List<Cell> cells = res.listCells();
 +      for(Cell cell : cells) {
 +        // Verify the value
 +        Assert.assertEquals(Bytes.toString(value),
 +            Bytes.toString(CellUtil.cloneValue(cell)));
 +        count++;
 +      }
 +    }
 +    results.close();
 +    Assert.assertEquals(3, count);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
index f6511f7,0000000..01050ae
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
@@@ -1,124 -1,0 +1,124 @@@
 +/**
 + *
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hbase.mob;
 +
 +import junit.framework.TestCase;
 +
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.KeyValue;
 +import org.apache.hadoop.hbase.KeyValue.Type;
 +import org.apache.hadoop.hbase.KeyValueUtil;
- import org.apache.hadoop.hbase.SmallTests;
 +import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 +import org.apache.hadoop.hbase.io.hfile.HFileContext;
 +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 +import org.apache.hadoop.hbase.regionserver.BloomType;
 +import org.apache.hadoop.hbase.regionserver.StoreFile;
 +import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
++import org.apache.hadoop.hbase.testclassification.SmallTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.apache.hadoop.hbase.util.FSUtils;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(SmallTests.class)
 +public class TestMobFile extends TestCase {
 +  static final Log LOG = LogFactory.getLog(TestMobFile.class);
 +  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
 +  private Configuration conf = TEST_UTIL.getConfiguration();
 +  private CacheConfig cacheConf =  new CacheConfig(conf);
 +  private final String TABLE = "tableName";
 +  private final String FAMILY = "familyName";
 +
 +  @Test
 +  public void testReadKeyValue() throws Exception {
 +    FileSystem fs = FileSystem.get(conf);
 +      Path testDir = FSUtils.getRootDir(conf);
 +    Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
 +    HFileContext meta = new 
HFileContextBuilder().withBlockSize(8*1024).build();
 +    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
 +            .withOutputDir(outputDir)
 +            .withFileContext(meta)
 +            .build();
 +    String caseName = getName();
 +    MobTestUtil.writeStoreFile(writer, caseName);
 +
 +    MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
 +        conf, cacheConf, BloomType.NONE));
 +    byte[] family = Bytes.toBytes(caseName);
 +    byte[] qualify = Bytes.toBytes(caseName);
 +
 +    // Test the start key
 +    byte[] startKey = Bytes.toBytes("aa");  // The start key bytes
 +    KeyValue expectedKey =
 +        new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, 
startKey);
 +    KeyValue seekKey = expectedKey.createKeyOnly(false);
 +    KeyValue kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, 
false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the end key
 +    byte[] endKey = Bytes.toBytes("zz");  // The end key bytes
 +    expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, 
Type.Put, endKey);
 +    seekKey = expectedKey.createKeyOnly(false);
 +    kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the random key
 +    byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
 +    expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, 
Type.Put, randomKey);
 +    seekKey = expectedKey.createKeyOnly(false);
 +    kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the key which is less than the start key
 +    byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa"
 +    expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, 
Type.Put, startKey);
 +    seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, 
Type.Put, lowerKey);
 +    kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
 +    MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
 +
 +    // Test the key which is more than the end key
 +    byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz"
 +    seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, 
Type.Put, upperKey);
 +    kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
 +    assertNull(kv);
 +  }
 +
 +  @Test
 +  public void testGetScanner() throws Exception {
 +    FileSystem fs = FileSystem.get(conf);
 +    Path testDir = FSUtils.getRootDir(conf);
 +    Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
 +    HFileContext meta = new 
HFileContextBuilder().withBlockSize(8*1024).build();
 +    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
 +            .withOutputDir(outputDir)
 +            .withFileContext(meta)
 +            .build();
 +    MobTestUtil.writeStoreFile(writer, getName());
 +
 +    MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
 +        conf, cacheConf, BloomType.NONE));
 +    assertNotNull(mobFile.getScanner());
 +    assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
index 154327c,0000000..1a809a1
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
@@@ -1,206 -1,0 +1,207 @@@
 +/**
 + *
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hbase.mob;
 +
 +import java.io.IOException;
 +import java.util.Date;
 +
 +import junit.framework.TestCase;
 +
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hbase.HBaseConfiguration;
 +import org.apache.hadoop.hbase.HBaseTestingUtility;
 +import org.apache.hadoop.hbase.HColumnDescriptor;
 +import org.apache.hadoop.hbase.HRegionInfo;
 +import org.apache.hadoop.hbase.HTableDescriptor;
 +import org.apache.hadoop.hbase.KeyValue;
- import org.apache.hadoop.hbase.SmallTests;
 +import org.apache.hadoop.hbase.TableName;
 +import org.apache.hadoop.hbase.regionserver.HMobStore;
 +import org.apache.hadoop.hbase.regionserver.HRegion;
 +import org.apache.hadoop.hbase.regionserver.StoreFile;
++import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
++import org.apache.hadoop.hbase.testclassification.SmallTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(SmallTests.class)
 +public class TestMobFileCache extends TestCase {
 +  static final Log LOG = LogFactory.getLog(TestMobFileCache.class);
 +  private HBaseTestingUtility UTIL;
 +  private HRegion region;
 +  private Configuration conf;
 +  private MobCacheConfig mobCacheConf;
 +  private MobFileCache mobFileCache;
 +  private Date currentDate = new Date();
 +  private final String TEST_CACHE_SIZE = "2";
 +  private final int EXPECTED_CACHE_SIZE_ZERO = 0;
 +  private final int EXPECTED_CACHE_SIZE_ONE = 1;
 +  private final int EXPECTED_CACHE_SIZE_TWO = 2;
 +  private final int EXPECTED_CACHE_SIZE_THREE = 3;
 +  private final long EXPECTED_REFERENCE_ONE = 1;
 +  private final long EXPECTED_REFERENCE_TWO = 2;
 +
 +  private final String TABLE = "tableName";
 +  private final String FAMILY1 = "family1";
 +  private final String FAMILY2 = "family2";
 +  private final String FAMILY3 = "family3";
 +
 +  private final byte[] ROW = Bytes.toBytes("row");
 +  private final byte[] ROW2 = Bytes.toBytes("row2");
 +  private final byte[] VALUE = Bytes.toBytes("value");
 +  private final byte[] VALUE2 = Bytes.toBytes("value2");
 +  private final byte[] QF1 = Bytes.toBytes("qf1");
 +  private final byte[] QF2 = Bytes.toBytes("qf2");
 +  private final byte[] QF3 = Bytes.toBytes("qf3");
 +
 +  @Override
 +  public void setUp() throws Exception {
 +    UTIL = HBaseTestingUtility.createLocalHTU();
 +    conf = UTIL.getConfiguration();
 +    HTableDescriptor htd = UTIL.createTableDescriptor("testMobFileCache");
 +    HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILY1);
 +    hcd1.setMobEnabled(true);
 +    hcd1.setMobThreshold(0);
 +    HColumnDescriptor hcd2 = new HColumnDescriptor(FAMILY2);
 +    hcd2.setMobEnabled(true);
 +    hcd2.setMobThreshold(0);
 +    HColumnDescriptor hcd3 = new HColumnDescriptor(FAMILY3);
 +    hcd3.setMobEnabled(true);
 +    hcd3.setMobThreshold(0);
 +    htd.addFamily(hcd1);
 +    htd.addFamily(hcd2);
 +    htd.addFamily(hcd3);
 +    region = UTIL.createLocalHRegion(htd, null, null);
 +  }
 +
 +  @Override
 +  protected void tearDown() throws Exception {
 +    region.close();
 +    region.getFilesystem().delete(UTIL.getDataTestDir(), true);
 +  }
 +
 +  /**
 +   * Create the mob store file.
 +   * @param family
 +   */
 +  private Path createMobStoreFile(String family) throws IOException {
 +    return createMobStoreFile(HBaseConfiguration.create(), family);
 +  }
 +
 +  /**
 +   * Create the mob store file
 +   * @param conf
 +   * @param family
 +   */
 +  private Path createMobStoreFile(Configuration conf, String family) throws 
IOException {
 +    HColumnDescriptor hcd = new HColumnDescriptor(family);
 +    hcd.setMaxVersions(4);
 +    hcd.setMobEnabled(true);
 +    mobCacheConf = new MobCacheConfig(conf, hcd);
 +    return createMobStoreFile(conf, hcd);
 +  }
 +
 +  /**
 +   * Create the mob store file
 +   * @param conf
 +   * @param hcd
 +   */
 +  private Path createMobStoreFile(Configuration conf, HColumnDescriptor hcd)
 +      throws IOException {
 +    // Setting up a Store
 +    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
 +    htd.addFamily(hcd);
 +    HMobStore mobStore = (HMobStore) region.getStore(hcd.getName());
 +    KeyValue key1 = new KeyValue(ROW, hcd.getName(), QF1, 1, VALUE);
 +    KeyValue key2 = new KeyValue(ROW, hcd.getName(), QF2, 1, VALUE);
 +    KeyValue key3 = new KeyValue(ROW2, hcd.getName(), QF3, 1, VALUE2);
 +    KeyValue[] keys = new KeyValue[] { key1, key2, key3 };
 +    int maxKeyCount = keys.length;
 +    HRegionInfo regionInfo = new HRegionInfo();
 +    StoreFile.Writer mobWriter = mobStore.createWriterInTmp(currentDate,
 +        maxKeyCount, hcd.getCompactionCompression(), 
regionInfo.getStartKey());
 +    Path mobFilePath = mobWriter.getPath();
 +    String fileName = mobFilePath.getName();
 +    mobWriter.append(key1);
 +    mobWriter.append(key2);
 +    mobWriter.append(key3);
 +    mobWriter.close();
 +    String targetPathName = MobUtils.formatDate(currentDate);
 +    Path targetPath = new Path(mobStore.getPath(), targetPathName);
 +    mobStore.commitFile(mobFilePath, targetPath);
 +    return new Path(targetPath, fileName);
 +  }
 +
 +  @Test
 +  public void testMobFileCache() throws Exception {
 +    FileSystem fs = FileSystem.get(conf);
 +    conf.set(MobConstants.MOB_FILE_CACHE_SIZE_KEY, TEST_CACHE_SIZE);
 +    mobFileCache = new MobFileCache(conf);
 +    Path file1Path = createMobStoreFile(FAMILY1);
 +    Path file2Path = createMobStoreFile(FAMILY2);
 +    Path file3Path = createMobStoreFile(FAMILY3);
 +
 +    // Before open one file by the MobFileCache
 +    assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize());
 +    // Open one file by the MobFileCache
 +    CachedMobFile cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(
 +        fs, file1Path, mobCacheConf);
 +    assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
 +    assertNotNull(cachedMobFile1);
 +    assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount());
 +
 +    // The evict is also managed by a schedule thread pool.
 +    // And its check period is set as 3600 seconds by default.
 +    // This evict should get the lock at the most time
 +    mobFileCache.evict();  // Cache not full, evict it
 +    assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
 +    assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount());
 +
 +    mobFileCache.evictFile(file1Path.getName());  // Evict one file
 +    assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize());
 +    assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile1.getReferenceCount());
 +
 +    cachedMobFile1.close();  // Close the cached mob file
 +
 +    // Reopen three cached file
 +    cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(
 +        fs, file1Path, mobCacheConf);
 +    assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
 +    CachedMobFile cachedMobFile2 = (CachedMobFile) mobFileCache.openFile(
 +        fs, file2Path, mobCacheConf);
 +    assertEquals(EXPECTED_CACHE_SIZE_TWO, mobFileCache.getCacheSize());
 +    CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile(
 +        fs, file3Path, mobCacheConf);
 +    // Before the evict
 +    // Evict the cache, should clost the first file 1
 +    assertEquals(EXPECTED_CACHE_SIZE_THREE, mobFileCache.getCacheSize());
 +    assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount());
 +    assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile2.getReferenceCount());
 +    assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile3.getReferenceCount());
 +    mobFileCache.evict();
 +    assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
 +    assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile1.getReferenceCount());
 +    assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile2.getReferenceCount());
 +    assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile3.getReferenceCount());
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe335b68/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java
index 9a6cf7f,0000000..fd78e59
mode 100644,000000..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java
@@@ -1,79 -1,0 +1,79 @@@
 +/**
 + *
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hbase.mob;
 +
 +import java.util.Date;
 +import java.util.Random;
 +import java.util.UUID;
 +
 +import junit.framework.TestCase;
 +
- import org.apache.hadoop.hbase.SmallTests;
++import org.apache.hadoop.hbase.testclassification.SmallTests;
 +import org.apache.hadoop.hbase.util.Bytes;
 +import org.apache.hadoop.hbase.util.MD5Hash;
 +import org.junit.Test;
 +import org.junit.experimental.categories.Category;
 +
 +@Category(SmallTests.class)
 +public class TestMobFileName extends TestCase {
 +
 +  private String uuid;
 +  private Date date;
 +  private String dateStr;
 +  private byte[] startKey;
 +
 +  public void setUp() {
 +    Random random = new Random();
 +    uuid = UUID.randomUUID().toString().replaceAll("-", "");
 +    date = new Date();
 +    dateStr = MobUtils.formatDate(date);
 +    startKey = Bytes.toBytes(random.nextInt());
 +  }
 +
 +  @Test
 +  public void testHashCode() {
 +    assertEquals(MobFileName.create(startKey, dateStr, uuid).hashCode(),
 +        MobFileName.create(startKey, dateStr, uuid).hashCode());
 +    assertNotSame(MobFileName.create(startKey, dateStr, uuid).hashCode(),
 +        MobFileName.create(startKey, dateStr, uuid).hashCode());
 +  }
 +
 +  @Test
 +  public void testCreate() {
 +    MobFileName mobFileName = MobFileName.create(startKey, dateStr, uuid);
 +    assertEquals(mobFileName, MobFileName.create(mobFileName.getFileName()));
 +  }
 +
 +  @Test
 +  public void testGet() {
 +    MobFileName mobFileName = MobFileName.create(startKey, dateStr, uuid);
 +    assertEquals(MD5Hash.getMD5AsHex(startKey, 0, startKey.length), 
mobFileName.getStartKey());
 +    assertEquals(dateStr, mobFileName.getDate());
 +    assertEquals(mobFileName.getFileName(), MD5Hash.getMD5AsHex(startKey, 0, 
startKey.length)
 +        + dateStr + uuid);
 +  }
 +
 +  @Test
 +  public void testEquals() {
 +    MobFileName mobFileName = MobFileName.create(startKey, dateStr, uuid);
 +    assertTrue(mobFileName.equals(mobFileName));
 +    assertFalse(mobFileName.equals(this));
 +    assertTrue(mobFileName.equals(MobFileName.create(startKey, dateStr, 
uuid)));
 +  }
 +}

Reply via email to