http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
index 0000000,820087a..8250b47
mode 000000,100644..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
@@@ -1,0 -1,511 +1,512 @@@
+ /**
+  *
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hbase.regionserver;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Random;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hbase.Cell;
+ import org.apache.hadoop.hbase.CellUtil;
+ import org.apache.hadoop.hbase.HBaseTestingUtility;
+ import org.apache.hadoop.hbase.HColumnDescriptor;
+ import org.apache.hadoop.hbase.HRegionInfo;
+ import org.apache.hadoop.hbase.HTableDescriptor;
+ import org.apache.hadoop.hbase.TableName;
+ import org.apache.hadoop.hbase.client.ConnectionFactory;
+ import org.apache.hadoop.hbase.client.Get;
+ import org.apache.hadoop.hbase.client.HBaseAdmin;
+ import org.apache.hadoop.hbase.client.Put;
+ import org.apache.hadoop.hbase.client.Result;
+ import org.apache.hadoop.hbase.client.ResultScanner;
+ import org.apache.hadoop.hbase.client.Scan;
+ import org.apache.hadoop.hbase.client.Table;
+ import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
+ import org.apache.hadoop.hbase.io.hfile.TestHFile;
+ import org.apache.hadoop.hbase.mob.MobConstants;
+ import org.apache.hadoop.hbase.mob.MobUtils;
+ import org.apache.hadoop.hbase.testclassification.MediumTests;
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.hbase.util.FSUtils;
+ import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+ import org.junit.AfterClass;
+ import org.junit.Assert;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
+ @Category(MediumTests.class)
+ public class TestMobStoreScanner {
+ 
+   private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+   private final static byte [] row1 = Bytes.toBytes("row1");
+   private final static byte [] row2 = Bytes.toBytes("row2");
+   private final static byte [] family = Bytes.toBytes("family");
+   private final static byte [] qf1 = Bytes.toBytes("qualifier1");
+   private final static byte [] qf2 = Bytes.toBytes("qualifier2");
+   protected final byte[] qf3 = Bytes.toBytes("qualifier3");
+   private static Table table;
+   private static HBaseAdmin admin;
+   private static HColumnDescriptor hcd;
+   private static HTableDescriptor desc;
+   private static Random random = new Random();
+   private static long defaultThreshold = 10;
+   private FileSystem fs;
+   private Configuration conf;
+ 
+   @BeforeClass
+   public static void setUpBeforeClass() throws Exception {
+     TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+     
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", 
true);
+     TEST_UTIL.getConfiguration().setInt("hbase.client.keyvalue.maxsize", 
100*1024*1024);
+ 
+     TEST_UTIL.startMiniCluster(1);
+   }
+ 
+   @AfterClass
+   public static void tearDownAfterClass() throws Exception {
+     TEST_UTIL.shutdownMiniCluster();
+   }
+ 
+   public void setUp(long threshold, TableName tn) throws Exception {
+     conf = TEST_UTIL.getConfiguration();
+     fs = FileSystem.get(conf);
+     desc = new HTableDescriptor(tn);
+     hcd = new HColumnDescriptor(family);
+     hcd.setMobEnabled(true);
+     hcd.setMobThreshold(threshold);
+     hcd.setMaxVersions(4);
+     desc.addFamily(hcd);
+     admin = TEST_UTIL.getHBaseAdmin();
+     admin.createTable(desc);
+     table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
+             .getTable(tn);
+   }
+ 
+   /**
+    * Generate the mob value.
+    *
+    * @param size the size of the value
+    * @return the mob value generated
+    */
+   private static byte[] generateMobValue(int size) {
+     byte[] mobVal = new byte[size];
+     random.nextBytes(mobVal);
+     return mobVal;
+   }
+ 
+   /**
+    * Set the scan attribute
+    *
+    * @param reversed if true, scan will be backward order
+    * @param mobScanRaw if true, scan will get the mob reference
+    * @return this
+    */
+   public void setScan(Scan scan, boolean reversed, boolean mobScanRaw) {
+     scan.setReversed(reversed);
+     scan.setMaxVersions(4);
+     if(mobScanRaw) {
+       scan.setAttribute(MobConstants.MOB_SCAN_RAW, 
Bytes.toBytes(Boolean.TRUE));
+     }
+   }
+ 
+   @Test
+   public void testMobStoreScanner() throws Exception {
+         testGetFromFiles(false);
+         testGetFromMemStore(false);
+     testGetReferences(false);
+     testMobThreshold(false);
+     testGetFromArchive(false);
+   }
+ 
+   @Test
+   public void testReversedMobStoreScanner() throws Exception {
+         testGetFromFiles(true);
+         testGetFromMemStore(true);
+     testGetReferences(true);
+     testMobThreshold(true);
+     testGetFromArchive(true);
+   }
+ 
+   @Test(timeout=60000)
+   public void testGetMassive() throws Exception {
+     setUp(defaultThreshold, TableName.valueOf("testGetMassive"));
+ 
 -    // Put some data 5 10, 15, 20  mb ok  (this would be right below protobuf 
default max size of 64MB.
++    // Put some data 5 10, 15, 20  mb ok  (this would be right below protobuf
++    // default max size of 64MB.
+     // 25, 30, 40 fail.  these is above protobuf max size of 64MB
+     byte[] bigValue = new byte[25*1024*1024];
+ 
+     Put put = new Put(row1);
+     put.addColumn(family, qf1, bigValue);
+     put.addColumn(family, qf2, bigValue);
+     put.addColumn(family, qf3, bigValue);
+     table.put(put);
+ 
+     Get g = new Get(row1);
+     Result r = table.get(g);
+     // should not have blown up.
+   }
+ 
+   @Test
+   public void testReadPt() throws Exception {
+     TableName tn = TableName.valueOf("testReadPt");
+     setUp(0L, tn);
+     long ts = System.currentTimeMillis();
+     byte[] value1 = Bytes.toBytes("value1");
+     Put put1 = new Put(row1);
+     put1.addColumn(family, qf1, ts, value1);
+     table.put(put1);
+     Put put2 = new Put(row2);
+     byte[] value2 = Bytes.toBytes("value2");
+     put2.addColumn(family, qf1, ts, value2);
+     table.put(put2);
+ 
+     Scan scan = new Scan();
+     scan.setCaching(1);
+     ResultScanner rs = table.getScanner(scan);
+ 
+     Put put3 = new Put(row1);
+     byte[] value3 = Bytes.toBytes("value3");
+     put3.addColumn(family, qf1, ts, value3);
+     table.put(put3);
+     Put put4 = new Put(row2);
+     byte[] value4 = Bytes.toBytes("value4");
+     put4.addColumn(family, qf1, ts, value4);
+     table.put(put4);
+     Result result = rs.next();
+     Cell cell = result.getColumnLatestCell(family, qf1);
 -    Assert.assertEquals("value1", Bytes.toString(cell.getValue()));
++    Assert.assertEquals("value1", Bytes.toString(CellUtil.cloneValue(cell)));
+ 
+     admin.flush(tn);
+     result = rs.next();
+     cell = result.getColumnLatestCell(family, qf1);
 -    Assert.assertEquals("value2", Bytes.toString(cell.getValue()));
++    Assert.assertEquals("value2", Bytes.toString(CellUtil.cloneValue(cell)));
+   }
+ 
+   @Test
+   public void testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss() 
throws Exception {
+     TableName tn = 
TableName.valueOf("testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss");
+     setUp(0, tn);
+     createRecordAndCorruptMobFile(tn, row1, family, qf1, 
Bytes.toBytes("value1"));
+     Get get = new Get(row1);
+     get.setAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS, 
Bytes.toBytes(true));
+     Result result = table.get(get);
+     Cell cell = result.getColumnLatestCell(family, qf1);
+     Assert.assertEquals(0, CellUtil.cloneValue(cell).length);
+   }
+ 
+   @Test
+   public void testReadFromCorruptMobFiles() throws Exception {
+     TableName tn = TableName.valueOf("testReadFromCorruptMobFiles");
+     setUp(0, tn);
+     createRecordAndCorruptMobFile(tn, row1, family, qf1, 
Bytes.toBytes("value1"));
+     Get get = new Get(row1);
+     IOException ioe = null;
+     try {
+       table.get(get);
+     } catch (IOException e) {
+       ioe = e;
+     }
+     Assert.assertNotNull(ioe);
+     Assert.assertEquals(CorruptHFileException.class.getName(), 
ioe.getClass().getName());
+   }
+ 
+   private void createRecordAndCorruptMobFile(TableName tn, byte[] row, byte[] 
family, byte[] qf,
+     byte[] value) throws IOException {
+     Put put1 = new Put(row);
+     put1.addColumn(family, qf, value);
+     table.put(put1);
+     admin.flush(tn);
+     Path mobFile = getFlushedMobFile(conf, fs, tn, Bytes.toString(family));
+     Assert.assertNotNull(mobFile);
+     // create new corrupt mob file.
+     Path corruptFile = new Path(mobFile.getParent(), "dummy");
+     TestHFile.truncateFile(fs, mobFile, corruptFile);
+     fs.delete(mobFile, true);
+     fs.rename(corruptFile, mobFile);
+   }
+ 
+   private Path getFlushedMobFile(Configuration conf, FileSystem fs, TableName 
table, String family)
+     throws IOException {
+     Path regionDir = MobUtils.getMobRegionPath(conf, table);
+     Path famDir = new Path(regionDir, family);
+     FileStatus[] hfFss = fs.listStatus(famDir);
+     for (FileStatus hfs : hfFss) {
+       if (!hfs.isDirectory()) {
+         return hfs.getPath();
+       }
+     }
+     return null;
+   }
+ 
+   private void testGetFromFiles(boolean reversed) throws Exception {
+     TableName tn = TableName.valueOf("testGetFromFiles" + reversed);
+     setUp(defaultThreshold, tn);
+     long ts1 = System.currentTimeMillis();
+     long ts2 = ts1 + 1;
+     long ts3 = ts1 + 2;
+     byte [] value = generateMobValue((int)defaultThreshold+1);
+ 
+     Put put1 = new Put(row1);
+     put1.addColumn(family, qf1, ts3, value);
+     put1.addColumn(family, qf2, ts2, value);
+     put1.addColumn(family, qf3, ts1, value);
+     table.put(put1);
+ 
+     admin.flush(tn);
+ 
+     Scan scan = new Scan();
+     setScan(scan, reversed, false);
+ 
+     ResultScanner results = table.getScanner(scan);
+     int count = 0;
+     for (Result res : results) {
+       List<Cell> cells = res.listCells();
+       for(Cell cell : cells) {
+         // Verify the value
+         Assert.assertEquals(Bytes.toString(value),
+             Bytes.toString(CellUtil.cloneValue(cell)));
+         count++;
+       }
+     }
+     results.close();
+     Assert.assertEquals(3, count);
+   }
+ 
+   private void testGetFromMemStore(boolean reversed) throws Exception {
+     setUp(defaultThreshold, TableName.valueOf("testGetFromMemStore" + 
reversed));
+     long ts1 = System.currentTimeMillis();
+     long ts2 = ts1 + 1;
+     long ts3 = ts1 + 2;
+     byte [] value = generateMobValue((int)defaultThreshold+1);;
+ 
+     Put put1 = new Put(row1);
+     put1.addColumn(family, qf1, ts3, value);
+     put1.addColumn(family, qf2, ts2, value);
+     put1.addColumn(family, qf3, ts1, value);
+     table.put(put1);
+ 
+     Scan scan = new Scan();
+     setScan(scan, reversed, false);
+ 
+     ResultScanner results = table.getScanner(scan);
+     int count = 0;
+     for (Result res : results) {
+       List<Cell> cells = res.listCells();
+       for(Cell cell : cells) {
+         // Verify the value
+         Assert.assertEquals(Bytes.toString(value),
+             Bytes.toString(CellUtil.cloneValue(cell)));
+         count++;
+       }
+     }
+     results.close();
+     Assert.assertEquals(3, count);
+   }
+ 
+   private void testGetReferences(boolean reversed) throws Exception {
+     TableName tn = TableName.valueOf("testGetReferences" + reversed);
+     setUp(defaultThreshold, tn);
+     long ts1 = System.currentTimeMillis();
+     long ts2 = ts1 + 1;
+     long ts3 = ts1 + 2;
+     byte [] value = generateMobValue((int)defaultThreshold+1);;
+ 
+     Put put1 = new Put(row1);
+     put1.addColumn(family, qf1, ts3, value);
+     put1.addColumn(family, qf2, ts2, value);
+     put1.addColumn(family, qf3, ts1, value);
+     table.put(put1);
+ 
+     admin.flush(tn);
+ 
+     Scan scan = new Scan();
+     setScan(scan, reversed, true);
+ 
+     ResultScanner results = table.getScanner(scan);
+     int count = 0;
+     for (Result res : results) {
+       List<Cell> cells = res.listCells();
+       for(Cell cell : cells) {
+         // Verify the value
+         assertIsMobReference(cell, row1, family, value, tn);
+         count++;
+       }
+     }
+     results.close();
+     Assert.assertEquals(3, count);
+   }
+ 
+   private void testMobThreshold(boolean reversed) throws Exception {
+     TableName tn = TableName.valueOf("testMobThreshold" + reversed);
+     setUp(defaultThreshold, tn);
+     byte [] valueLess = generateMobValue((int)defaultThreshold-1);
+     byte [] valueEqual = generateMobValue((int)defaultThreshold);
+     byte [] valueGreater = generateMobValue((int)defaultThreshold+1);
+     long ts1 = System.currentTimeMillis();
+     long ts2 = ts1 + 1;
+     long ts3 = ts1 + 2;
+ 
+     Put put1 = new Put(row1);
+     put1.addColumn(family, qf1, ts3, valueLess);
+     put1.addColumn(family, qf2, ts2, valueEqual);
+     put1.addColumn(family, qf3, ts1, valueGreater);
+     table.put(put1);
+ 
+     admin.flush(tn);
+ 
+     Scan scan = new Scan();
+     setScan(scan, reversed, true);
+ 
+     Cell cellLess= null;
+     Cell cellEqual = null;
+     Cell cellGreater = null;
+     ResultScanner results = table.getScanner(scan);
+     int count = 0;
+     for (Result res : results) {
+       List<Cell> cells = res.listCells();
+       for(Cell cell : cells) {
+         // Verify the value
+         String qf = Bytes.toString(CellUtil.cloneQualifier(cell));
+         if(qf.equals(Bytes.toString(qf1))) {
+           cellLess = cell;
+         }
+         if(qf.equals(Bytes.toString(qf2))) {
+           cellEqual = cell;
+         }
+         if(qf.equals(Bytes.toString(qf3))) {
+           cellGreater = cell;
+         }
+         count++;
+       }
+     }
+     Assert.assertEquals(3, count);
+     assertNotMobReference(cellLess, row1, family, valueLess);
+     assertNotMobReference(cellEqual, row1, family, valueEqual);
+     assertIsMobReference(cellGreater, row1, family, valueGreater, tn);
+     results.close();
+   }
+ 
+   private void testGetFromArchive(boolean reversed) throws Exception {
+     TableName tn = TableName.valueOf("testGetFromArchive" + reversed);
+     setUp(defaultThreshold, tn);
+     long ts1 = System.currentTimeMillis();
+     long ts2 = ts1 + 1;
+     long ts3 = ts1 + 2;
+     byte [] value = generateMobValue((int)defaultThreshold+1);;
+     // Put some data
+     Put put1 = new Put(row1);
+     put1.addColumn(family, qf1, ts3, value);
+     put1.addColumn(family, qf2, ts2, value);
+     put1.addColumn(family, qf3, ts1, value);
+     table.put(put1);
+ 
+     admin.flush(tn);
+ 
+     // Get the files in the mob path
+     Path mobFamilyPath;
+     mobFamilyPath = new 
Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(), tn),
+       hcd.getNameAsString());
+     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+     FileStatus[] files = fs.listStatus(mobFamilyPath);
+ 
+     // Get the archive path
+     Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
+     Path tableDir = FSUtils.getTableDir(rootDir, tn);
+     HRegionInfo regionInfo = MobUtils.getMobRegionInfo(tn);
+     Path storeArchiveDir = 
HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(),
+         regionInfo, tableDir, family);
+ 
+     // Move the files from mob path to archive path
+     fs.mkdirs(storeArchiveDir);
+     int fileCount = 0;
+     for(FileStatus file : files) {
+       fileCount++;
+       Path filePath = file.getPath();
+       Path src = new Path(mobFamilyPath, filePath.getName());
+       Path dst = new Path(storeArchiveDir, filePath.getName());
+       fs.rename(src, dst);
+     }
+ 
+     // Verify the moving success
+     FileStatus[] files1 = fs.listStatus(mobFamilyPath);
+     Assert.assertEquals(0, files1.length);
+     FileStatus[] files2 = fs.listStatus(storeArchiveDir);
+     Assert.assertEquals(fileCount, files2.length);
+ 
+     // Scan from archive
+     Scan scan = new Scan();
+     setScan(scan, reversed, false);
+     ResultScanner results = table.getScanner(scan);
+     int count = 0;
+     for (Result res : results) {
+       List<Cell> cells = res.listCells();
+       for(Cell cell : cells) {
+         // Verify the value
+         Assert.assertEquals(Bytes.toString(value),
+             Bytes.toString(CellUtil.cloneValue(cell)));
+         count++;
+       }
+     }
+     results.close();
+     Assert.assertEquals(3, count);
+   }
+ 
+   /**
+    * Assert the value is not store in mob.
+    */
+   private static void assertNotMobReference(Cell cell, byte[] row, byte[] 
family,
+       byte[] value) throws IOException {
+     Assert.assertEquals(Bytes.toString(row),
+         Bytes.toString(CellUtil.cloneRow(cell)));
+     Assert.assertEquals(Bytes.toString(family),
+         Bytes.toString(CellUtil.cloneFamily(cell)));
+     Assert.assertTrue(Bytes.toString(value).equals(
+         Bytes.toString(CellUtil.cloneValue(cell))));
+   }
+ 
+   /**
+    * Assert the value is store in mob.
+    */
+   private static void assertIsMobReference(Cell cell, byte[] row, byte[] 
family,
+       byte[] value, TableName tn) throws IOException {
+     Assert.assertEquals(Bytes.toString(row),
+         Bytes.toString(CellUtil.cloneRow(cell)));
+     Assert.assertEquals(Bytes.toString(family),
+         Bytes.toString(CellUtil.cloneFamily(cell)));
+     Assert.assertFalse(Bytes.toString(value).equals(
+         Bytes.toString(CellUtil.cloneValue(cell))));
+     byte[] referenceValue = CellUtil.cloneValue(cell);
+     String fileName = Bytes.toString(referenceValue, Bytes.SIZEOF_INT,
+         referenceValue.length - Bytes.SIZEOF_INT);
+     int valLen = Bytes.toInt(referenceValue, 0, Bytes.SIZEOF_INT);
+     Assert.assertEquals(value.length, valLen);
+     Path mobFamilyPath;
+     mobFamilyPath = new 
Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(),
+         tn), hcd.getNameAsString());
+     Path targetPath = new Path(mobFamilyPath, fileName);
+     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+     Assert.assertTrue(fs.exists(targetPath));
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
index 0000000,f7a9918..a8e88b1
mode 000000,100644..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
@@@ -1,0 -1,548 +1,547 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hbase.snapshot;
+ 
+ import static org.junit.Assert.*;
+ 
+ import java.io.IOException;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.concurrent.CountDownLatch;
+ 
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.commons.logging.impl.Log4JLogger;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hbase.HBaseTestingUtility;
+ import org.apache.hadoop.hbase.HConstants;
+ import org.apache.hadoop.hbase.HRegionInfo;
+ import org.apache.hadoop.hbase.TableName;
+ import org.apache.hadoop.hbase.TableNotFoundException;
+ import org.apache.hadoop.hbase.client.*;
+ import org.apache.hadoop.hbase.ipc.RpcServer;
+ import org.apache.hadoop.hbase.master.HMaster;
+ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+ import org.apache.hadoop.hbase.mob.MobConstants;
+ import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+ import org.apache.hadoop.hbase.testclassification.ClientTests;
+ import org.apache.hadoop.hbase.testclassification.LargeTests;
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.hbase.util.FSUtils;
+ import org.apache.log4j.Level;
+ import org.junit.After;
+ import org.junit.AfterClass;
+ import org.junit.Before;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
+ /**
+  * Test creating/using/deleting snapshots from the client
+  * <p>
+  * This is an end-to-end test for the snapshot utility
+  *
+  * TODO This is essentially a clone of TestSnapshotFromClient.  This is worth 
refactoring this
+  * because there will be a few more flavors of snapshots that need to run 
these tests.
+  */
+ @Category({ClientTests.class, LargeTests.class})
+ public class TestMobFlushSnapshotFromClient {
+   private static final Log LOG = 
LogFactory.getLog(TestFlushSnapshotFromClient.class);
+   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+   private static final int NUM_RS = 2;
+   private static final String STRING_TABLE_NAME = "test";
+   private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+   private static final TableName TABLE_NAME =
+       TableName.valueOf(STRING_TABLE_NAME);
+   private final int DEFAULT_NUM_ROWS = 100;
+ 
+   /**
+    * Setup the config for the cluster
+    * @throws Exception on failure
+    */
+   @BeforeClass
+   public static void setupCluster() throws Exception {
+     ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
+     ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
+     setupConf(UTIL.getConfiguration());
+     UTIL.startMiniCluster(NUM_RS);
+   }
+ 
+   private static void setupConf(Configuration conf) {
+     // disable the ui
+     conf.setInt("hbase.regionsever.info.port", -1);
+     // change the flush size to a small amount, regulating number of store 
files
+     conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+     // so make sure we get a compaction when doing a load, but keep around 
some
+     // files in the store
+     conf.setInt("hbase.hstore.compaction.min", 10);
+     conf.setInt("hbase.hstore.compactionThreshold", 10);
+     // block writes if we get to 12 store files
+     conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+     // Enable snapshot
+     conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+     conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+       ConstantSizeRegionSplitPolicy.class.getName());
+     conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+   }
+ 
+   @Before
+   public void setup() throws Exception {
+     MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     UTIL.deleteTable(TABLE_NAME);
+ 
+     SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+     SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+   }
+ 
+   @AfterClass
+   public static void cleanupTest() throws Exception {
+     try {
+       UTIL.shutdownMiniCluster();
+     } catch (Exception e) {
+       LOG.warn("failure shutting down cluster", e);
+     }
+   }
+ 
+   /**
+    * Test simple flush snapshotting a table that is online
+    * @throws Exception
+    */
+   @Test (timeout=300000)
+   public void testFlushTableSnapshot() throws Exception {
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+ 
+     // put some stuff in the table
+     Table table = 
ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
+ 
+     LOG.debug("FS state before snapshot:");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+         FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ 
+     // take a snapshot of the enabled table
+     String snapshotString = "offlineTableSnapshot";
+     byte[] snapshot = Bytes.toBytes(snapshotString);
+     admin.snapshot(snapshotString, TABLE_NAME, 
SnapshotDescription.Type.FLUSH);
+     LOG.debug("Snapshot completed.");
+ 
+     // make sure we have the snapshot
+     List<SnapshotDescription> snapshots = 
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+       snapshot, TABLE_NAME);
+ 
+     // make sure its a valid snapshot
+     FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+     Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+     LOG.debug("FS state after snapshot:");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+         FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ 
+     SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, 
TEST_FAM, rootDir,
+         admin, fs);
+   }
+ 
+    /**
+    * Test snapshotting a table that is online without flushing
+    * @throws Exception
+    */
+   @Test(timeout=30000)
+   public void testSkipFlushTableSnapshot() throws Exception {
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+ 
+     // put some stuff in the table
+     Table table = 
ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+     UTIL.loadTable(table, TEST_FAM);
+ 
+     LOG.debug("FS state before snapshot:");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+         FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ 
+     // take a snapshot of the enabled table
+     String snapshotString = "skipFlushTableSnapshot";
+     byte[] snapshot = Bytes.toBytes(snapshotString);
+     admin.snapshot(snapshotString, TABLE_NAME, 
SnapshotDescription.Type.SKIPFLUSH);
+     LOG.debug("Snapshot completed.");
+ 
+     // make sure we have the snapshot
+     List<SnapshotDescription> snapshots = 
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+         snapshot, TABLE_NAME);
+ 
+     // make sure its a valid snapshot
+     FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+     Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+     LOG.debug("FS state after snapshot:");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+         FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ 
+     SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, 
TEST_FAM, rootDir,
+         admin, fs);
+ 
+     admin.deleteSnapshot(snapshot);
+     snapshots = admin.listSnapshots();
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+   }
+ 
+ 
+   /**
+    * Test simple flush snapshotting a table that is online
+    * @throws Exception
+    */
+   @Test (timeout=300000)
+   public void testFlushTableSnapshotWithProcedure() throws Exception {
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+ 
+     // put some stuff in the table
+     Table table = 
ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
+ 
+     LOG.debug("FS state before snapshot:");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+         FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ 
+     // take a snapshot of the enabled table
+     String snapshotString = "offlineTableSnapshot";
+     byte[] snapshot = Bytes.toBytes(snapshotString);
+     Map<String, String> props = new HashMap<String, String>();
+     props.put("table", TABLE_NAME.getNameAsString());
+     
admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION,
+         snapshotString, props);
+ 
+ 
+     LOG.debug("Snapshot completed.");
+ 
+     // make sure we have the snapshot
+     List<SnapshotDescription> snapshots = 
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+       snapshot, TABLE_NAME);
+ 
+     // make sure its a valid snapshot
+     FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+     Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+     LOG.debug("FS state after snapshot:");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+         FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ 
+     SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, 
TEST_FAM, rootDir,
+         admin, fs);
+   }
+ 
+   @Test (timeout=300000)
+   public void testSnapshotFailsOnNonExistantTable() throws Exception {
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+     TableName tableName = TableName.valueOf("_not_a_table");
+ 
+     // make sure the table doesn't exist
+     boolean fail = false;
+     do {
+     try {
+       admin.getTableDescriptor(tableName);
+       fail = true;
+       LOG.error("Table:" + tableName + " already exists, checking a new 
name");
+       tableName = TableName.valueOf(tableName+"!");
+     } catch (TableNotFoundException e) {
+       fail = false;
+       }
+     } while (fail);
+ 
+     // snapshot the non-existant table
+     try {
+       admin.snapshot("fail", tableName, SnapshotDescription.Type.FLUSH);
+       fail("Snapshot succeeded even though there is not table.");
+     } catch (SnapshotCreationException e) {
+       LOG.info("Correctly failed to snapshot a non-existant table:" + 
e.getMessage());
+     }
+   }
+ 
+   @Test(timeout = 300000)
+   public void testAsyncFlushSnapshot() throws Exception {
+     Admin admin = UTIL.getHBaseAdmin();
+     SnapshotDescription snapshot = 
SnapshotDescription.newBuilder().setName("asyncSnapshot")
+         .setTable(TABLE_NAME.getNameAsString())
+         .setType(SnapshotDescription.Type.FLUSH)
+         .build();
+ 
+     // take the snapshot async
+     admin.takeSnapshotAsync(snapshot);
+ 
+     // constantly loop, looking for the snapshot to complete
+     HMaster master = UTIL.getMiniHBaseCluster().getMaster();
+     SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
+     LOG.info(" === Async Snapshot Completed ===");
+     FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+       FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+     // make sure we get the snapshot
+     SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
+   }
+ 
+   @Test (timeout=300000)
+   public void testSnapshotStateAfterMerge() throws Exception {
+     int numRows = DEFAULT_NUM_ROWS;
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+     // load the table so we have some data
+     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
+ 
+     // Take a snapshot
+     String snapshotBeforeMergeName = "snapshotBeforeMerge";
+     admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, 
SnapshotDescription.Type.FLUSH);
+ 
+     // Clone the table
+     TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
+     admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
+     SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);
+ 
+     // Merge two regions
+     List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
+     Collections.sort(regions, new Comparator<HRegionInfo>() {
+       public int compare(HRegionInfo r1, HRegionInfo r2) {
+         return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
+       }
+     });
+ 
+     int numRegions = admin.getTableRegions(TABLE_NAME).size();
+     int numRegionsAfterMerge = numRegions - 2;
+     admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
+         regions.get(2).getEncodedNameAsBytes(), true);
+     admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
+         regions.get(6).getEncodedNameAsBytes(), true);
+ 
+     // Verify that there's one region less
+     waitRegionsAfterMerge(numRegionsAfterMerge);
+     assertEquals(numRegionsAfterMerge, 
admin.getTableRegions(TABLE_NAME).size());
+ 
+     // Clone the table
+     TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
+     admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
+     SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
+ 
+     MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
+     MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneBeforeMergeName, 
numRows);
+     MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneAfterMergeName, 
numRows);
+ 
+     // test that we can delete the snapshot
+     UTIL.deleteTable(cloneAfterMergeName);
+     UTIL.deleteTable(cloneBeforeMergeName);
+   }
+ 
+   @Test (timeout=300000)
+   public void testTakeSnapshotAfterMerge() throws Exception {
+     int numRows = DEFAULT_NUM_ROWS;
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+     // load the table so we have some data
+     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
+ 
+     // Merge two regions
+     List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
+     Collections.sort(regions, new Comparator<HRegionInfo>() {
+       public int compare(HRegionInfo r1, HRegionInfo r2) {
+         return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
+       }
+     });
+ 
+     int numRegions = admin.getTableRegions(TABLE_NAME).size();
+     int numRegionsAfterMerge = numRegions - 2;
+     admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
+         regions.get(2).getEncodedNameAsBytes(), true);
+     admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
+         regions.get(6).getEncodedNameAsBytes(), true);
+ 
+     waitRegionsAfterMerge(numRegionsAfterMerge);
+     assertEquals(numRegionsAfterMerge, 
admin.getTableRegions(TABLE_NAME).size());
+ 
+     // Take a snapshot
+     String snapshotName = "snapshotAfterMerge";
+     SnapshotTestingUtils.snapshot(admin, snapshotName, 
TABLE_NAME.getNameAsString(),
+       SnapshotDescription.Type.FLUSH, 3);
+ 
+     // Clone the table
+     TableName cloneName = TableName.valueOf("cloneMerge");
+     admin.cloneSnapshot(snapshotName, cloneName);
+     SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
+ 
+     MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
+     MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneName, numRows);
+ 
+     // test that we can delete the snapshot
+     UTIL.deleteTable(cloneName);
+   }
+ 
+   /**
+    * Basic end-to-end test of simple-flush-based snapshots
+    */
+   @Test (timeout=300000)
+   public void testFlushCreateListDestroy() throws Exception {
+     LOG.debug("------- Starting Snapshot test -------------");
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+     // load the table so we have some data
+     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
+ 
+     String snapshotName = "flushSnapshotCreateListDestroy";
+     FileSystem fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+     Path rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+     SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, 
Bytes.toString(TEST_FAM),
+       snapshotName, rootDir, fs, true);
+   }
+ 
+   /**
+    * Demonstrate that we reject snapshot requests if there is a snapshot 
already running on the
+    * same table currently running and that concurrent snapshots on different 
tables can both
+    * succeed concurrently.
+    */
+   @Test(timeout=300000)
+   public void testConcurrentSnapshottingAttempts() throws IOException, 
InterruptedException {
+     final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2";
+     final TableName TABLE2_NAME =
+         TableName.valueOf(STRING_TABLE2_NAME);
+ 
+     int ssNum = 20;
+     Admin admin = UTIL.getHBaseAdmin();
+     // make sure we don't fail on listing snapshots
+     SnapshotTestingUtils.assertNoSnapshots(admin);
+     // create second testing table
+     SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
+     // load the table so we have some data
+     SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
+     SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, 
TEST_FAM);
+ 
+     final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
+     // We'll have one of these per thread
+     class SSRunnable implements Runnable {
+       SnapshotDescription ss;
+       SSRunnable(SnapshotDescription ss) {
+         this.ss = ss;
+       }
+ 
+       @Override
+       public void run() {
+         try {
+           Admin admin = UTIL.getHBaseAdmin();
+           LOG.info("Submitting snapshot request: " + 
ClientSnapshotDescriptionUtils.toString(ss));
+           admin.takeSnapshotAsync(ss);
+         } catch (Exception e) {
+           LOG.info("Exception during snapshot request: " + 
ClientSnapshotDescriptionUtils.toString(
+               ss)
+               + ".  This is ok, we expect some", e);
+         }
+         LOG.info("Submitted snapshot request: " + 
ClientSnapshotDescriptionUtils.toString(ss));
+         toBeSubmitted.countDown();
+       }
+     };
+ 
+     // build descriptions
+     SnapshotDescription[] descs = new SnapshotDescription[ssNum];
+     for (int i = 0; i < ssNum; i++) {
+       SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+       builder.setTable(((i % 2) == 0 ? TABLE_NAME : 
TABLE2_NAME).getNameAsString());
+       builder.setName("ss"+i);
+       builder.setType(SnapshotDescription.Type.FLUSH);
+       descs[i] = builder.build();
+     }
+ 
+     // kick each off its own thread
+     for (int i=0 ; i < ssNum; i++) {
+       new Thread(new SSRunnable(descs[i])).start();
+     }
+ 
+     // wait until all have been submitted
+     toBeSubmitted.await();
+ 
+     // loop until all are done.
+     while (true) {
+       int doneCount = 0;
+       for (SnapshotDescription ss : descs) {
+         try {
+           if (admin.isSnapshotFinished(ss)) {
+             doneCount++;
+           }
+         } catch (Exception e) {
+           LOG.warn("Got an exception when checking for snapshot " + 
ss.getName(), e);
+           doneCount++;
+         }
+       }
+       if (doneCount == descs.length) {
+         break;
+       }
+       Thread.sleep(100);
+     }
+ 
+     // dump for debugging
+     logFSTree(FSUtils.getRootDir(UTIL.getConfiguration()));
+ 
+     List<SnapshotDescription> taken = admin.listSnapshots();
+     int takenSize = taken.size();
+     LOG.info("Taken " + takenSize + " snapshots:  " + taken);
+     assertTrue("We expect at least 1 request to be rejected because of we 
concurrently" +
+         " issued many requests", takenSize < ssNum && takenSize > 0);
+ 
+     // Verify that there's at least one snapshot per table
+     int t1SnapshotsCount = 0;
+     int t2SnapshotsCount = 0;
+     for (SnapshotDescription ss : taken) {
+       if (TableName.valueOf(ss.getTable()).equals(TABLE_NAME)) {
+         t1SnapshotsCount++;
+       } else if (TableName.valueOf(ss.getTable()).equals(TABLE2_NAME)) {
+         t2SnapshotsCount++;
+       }
+     }
+     assertTrue("We expect at least 1 snapshot of table1 ", t1SnapshotsCount > 
0);
+     assertTrue("We expect at least 1 snapshot of table2 ", t2SnapshotsCount > 
0);
+ 
+     UTIL.deleteTable(TABLE2_NAME);
+   }
+ 
+   private void logFSTree(Path root) throws IOException {
+     FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, 
LOG);
+   }
+ 
+   private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
+       throws IOException, InterruptedException {
+     Admin admin = UTIL.getHBaseAdmin();
+     // Verify that there's one region less
+     long startTime = System.currentTimeMillis();
+     while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) {
+       // This may be flaky... if after 15sec the merge is not complete give up
+       // it will fail in the assertEquals(numRegionsAfterMerge).
+       if ((System.currentTimeMillis() - startTime) > 15000)
+         break;
+       Thread.sleep(100);
+     }
+     SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
+   }
+ }
 -

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
index 0000000,70b4312..c5ec3ad
mode 000000,100644..100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
@@@ -1,0 -1,159 +1,160 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hbase.snapshot;
+ 
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertTrue;
+ 
+ import java.io.IOException;
+ 
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hbase.HBaseTestingUtility;
+ import org.apache.hadoop.hbase.HTableDescriptor;
+ import org.apache.hadoop.hbase.testclassification.SmallTests;
+ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+ import org.apache.hadoop.hbase.io.HFileLink;
+ import org.apache.hadoop.hbase.mob.MobConstants;
+ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+ import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+ import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+ import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils.SnapshotMock;
+ import org.apache.hadoop.hbase.util.FSTableDescriptors;
+ import org.apache.hadoop.hbase.util.FSUtils;
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ 
+ /**
+  * Test the restore/clone operation from a file-system point of view.
+  */
+ @Category(SmallTests.class)
+ public class TestMobRestoreSnapshotHelper {
+   final Log LOG = LogFactory.getLog(getClass());
+ 
+   private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+ 
+   private Configuration conf;
+   private FileSystem fs;
+   private Path rootDir;
+ 
+   @Before
+   public void setup() throws Exception {
+     rootDir = TEST_UTIL.getDataTestDir("testRestore");
+     fs = TEST_UTIL.getTestFileSystem();
+     TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 
0);
+     conf = TEST_UTIL.getConfiguration();
+     FSUtils.setRootDir(conf, rootDir);
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     fs.delete(TEST_UTIL.getDataTestDir(), true);
+   }
+ 
+   @Test
+   public void testRestore() throws IOException {
+     // Test Rolling-Upgrade like Snapshot.
+     // half machines writing using v1 and the others using v2 format.
+     SnapshotMock snapshotMock = new 
SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
+     SnapshotMock.SnapshotBuilder builder = 
snapshotMock.createSnapshotV2("snapshot");
+     builder.addRegionV1();
+     builder.addRegionV2();
+     builder.addRegionV2();
+     builder.addRegionV1();
+     Path snapshotDir = builder.commit();
+     HTableDescriptor htd = builder.getTableDescriptor();
+     SnapshotDescription desc = builder.getSnapshotDescription();
+ 
+     // Test clone a snapshot
+     HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
+     testRestore(snapshotDir, desc, htdClone);
+     verifyRestore(rootDir, htd, htdClone);
+ 
+     // Test clone a clone ("link to link")
+     SnapshotDescription cloneDesc = SnapshotDescription.newBuilder()
+         .setName("cloneSnapshot")
+         .setTable("testtb-clone")
+         .build();
+     Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
+     HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
+     testRestore(cloneDir, cloneDesc, htdClone2);
+     verifyRestore(rootDir, htd, htdClone2);
+   }
+ 
+   private void verifyRestore(final Path rootDir, final HTableDescriptor 
sourceHtd,
+       final HTableDescriptor htdClone) throws IOException {
+     String[] files = SnapshotTestingUtils.listHFileNames(fs,
+       FSUtils.getTableDir(rootDir, htdClone.getTableName()));
+     assertEquals(12, files.length);
+     for (int i = 0; i < files.length; i += 2) {
+       String linkFile = files[i];
+       String refFile = files[i+1];
+       assertTrue(linkFile + " should be a HFileLink", 
HFileLink.isHFileLink(linkFile));
+       assertTrue(refFile + " should be a Referene", 
StoreFileInfo.isReference(refFile));
+       assertEquals(sourceHtd.getTableName(), 
HFileLink.getReferencedTableName(linkFile));
+       Path refPath = getReferredToFile(refFile);
+       LOG.debug("get reference name for file " + refFile + " = " + refPath);
 -      assertTrue(refPath.getName() + " should be a HFileLink", 
HFileLink.isHFileLink(refPath.getName()));
++      assertTrue(refPath.getName() + " should be a HFileLink",
++        HFileLink.isHFileLink(refPath.getName()));
+       assertEquals(linkFile, refPath.getName());
+     }
+   }
+ 
+   /**
+    * Execute the restore operation
+    * @param snapshotDir The snapshot directory to use as "restore source"
+    * @param sd The snapshot descriptor
+    * @param htdClone The HTableDescriptor of the table to restore/clone.
+    */
+   public void testRestore(final Path snapshotDir, final SnapshotDescription 
sd,
+       final HTableDescriptor htdClone) throws IOException {
+     LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + 
snapshotDir);
+     FSUtils.logFileSystemState(fs, rootDir, LOG);
+ 
+     new FSTableDescriptors(conf).createTableDescriptor(htdClone);
+     RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, 
htdClone);
+     helper.restoreHdfsRegions();
+ 
+     LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" 
+ snapshotDir);
+     FSUtils.logFileSystemState(fs, rootDir, LOG);
+   }
+ 
+   /**
+    * Initialize the restore helper, based on the snapshot and table 
information provided.
+    */
+   private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final 
Path snapshotDir,
+       final SnapshotDescription sd, final HTableDescriptor htdClone) throws 
IOException {
+     ForeignExceptionDispatcher monitor = 
Mockito.mock(ForeignExceptionDispatcher.class);
+     MonitoredTask status = Mockito.mock(MonitoredTask.class);
+ 
+     SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, 
sd);
+     return new RestoreSnapshotHelper(conf, fs, manifest,
+       htdClone, rootDir, monitor, status);
+   }
+ 
+   private Path getReferredToFile(final String referenceName) {
+     Path fakeBasePath = new Path(new Path("table", "region"), "cf");
+     return StoreFileInfo.getReferredToFile(new Path(fakeBasePath, 
referenceName));
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --cc 
hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index a285eae,9938df7..597f665
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@@ -443,6 -440,32 +446,30 @@@ public class TestHBaseFsck 
    }
  
    /**
+    * Setup a clean table with a mob-enabled column.
+    *
+    * @param tableName The name of a table to be created.
+    * @throws Exception
+    */
+   void setupMobTable(TableName tablename) throws Exception {
+     HTableDescriptor desc = new HTableDescriptor(tablename);
+     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
+     hcd.setMobEnabled(true);
+     hcd.setMobThreshold(0);
+     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
+     createTable(TEST_UTIL, desc, SPLITS);
+ 
 -    tbl = (HTable) connection.getTable(tablename, tableExecutorService);
++    tbl = connection.getTable(tablename, tableExecutorService);
+     List<Put> puts = new ArrayList<Put>();
+     for (byte[] row : ROWKEYS) {
+       Put p = new Put(row);
+       p.add(FAM, Bytes.toBytes("val"), row);
+       puts.add(p);
+     }
+     tbl.put(puts);
 -    tbl.flushCommits();
+   }
+ 
+   /**
     * Counts the number of row to verify data loss or non-dataloss.
     */
    int countRows() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hbase/blob/493f36c8/src/main/asciidoc/_chapters/hbase_mob.adoc
----------------------------------------------------------------------
diff --cc src/main/asciidoc/_chapters/hbase_mob.adoc
index 0000000,cb1ce09..e843d0c
mode 000000,100644..100644
--- a/src/main/asciidoc/_chapters/hbase_mob.adoc
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@@ -1,0 -1,227 +1,236 @@@
+ ////
+ /**
+  *
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ ////
+ 
+ [[hbase_mob]]
+ == Storing Medium-sized Objects (MOB)
+ :doctype: book
+ :numbered:
+ :toc: left
+ :icons: font
+ :experimental:
+ :toc: left
+ :source-language: java
+ 
+ Data comes in many sizes, and saving all of your data in HBase, including 
binary
+ data such as images and documents, is ideal. While HBase can technically 
handle
+ binary objects with cells that are larger than 100 KB in size, HBase's normal
+ read and write paths are optimized for values smaller than 100KB in size. When
+ HBase deals with large numbers of objects over this threshold, referred to 
here
+ as medium objects, or MOBs, performance is degraded due to write amplification
+ caused by splits and compactions. When using MOBs, ideally your objects will 
be between
+ 100KB and 10MB. HBase ***FIX_VERSION_NUMBER*** adds support
+ for better managing large numbers of MOBs while maintaining performance,
+ consistency, and low operational overhead. MOB support is provided by the work
+ done in link:https://issues.apache.org/jira/browse/HBASE-11339[HBASE-11339]. 
To
+ take advantage of MOB, you need to use <<hfilev3,HFile version 3>>. 
Optionally,
+ configure the MOB file reader's cache settings for each RegionServer (see
+ <<mob.cache.configure>>), then configure specific columns to hold MOB data.
+ Client code does not need to change to take advantage of HBase MOB support. 
The
+ feature is transparent to the client.
+ 
+ === Configuring Columns for MOB
+ 
+ You can configure columns to support MOB during table creation or alteration,
+ either in HBase Shell or via the Java API. The two relevant properties are the
+ boolean `IS_MOB` and the `MOB_THRESHOLD`, which is the number of bytes at 
which
+ an object is considered to be a MOB. Only `IS_MOB` is required. If you do not
+ specify the `MOB_THRESHOLD`, the default threshold value of 100 KB is used.
+ 
+ .Configure a Column for MOB Using HBase Shell
+ ====
+ ----
+ hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400}
+ hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400}
+ ----
+ ====
+ 
+ .Configure a Column for MOB Using the Java API
+ ====
+ [source,java]
+ ----
+ ...
+ HColumnDescriptor hcd = new HColumnDescriptor(“f”);
+ hcd.setMobEnabled(true);
+ ...
+ hcd.setMobThreshold(102400L);
+ ...
+ ----
+ ====
+ 
+ 
+ === Testing MOB
+ 
 -The utility `org.apache.hadoop.hbase.IntegrationTestIngestMOB` is provided to 
assist with testing the MOB feature. The utility is run as follows:
++The utility `org.apache.hadoop.hbase.IntegrationTestIngestMOB` is provided to 
assist with testing
++the MOB feature. The utility is run as follows:
+ [source,bash]
+ ----
+ $ sudo -u hbase hbase org.apache.hadoop.hbase.IntegrationTestIngestMOB \
+             -threshold 102400 \
+             -minMobDataSize 512 \
+             -maxMobDataSize 5120
+ ----
+ 
 -* `*threshold*` is the threshold at which cells are considered to be MOBs. 
The default is 1 kB, expressed in bytes.
 -* `*minMobDataSize*` is the minimum value for the size of MOB data. The 
default is 512 B, expressed in bytes.
 -* `*maxMobDataSize*` is the maximum value for the size of MOB data. The 
default is 5 kB, expressed in bytes.
++* `*threshold*` is the threshold at which cells are considered to be MOBs.
++   The default is 1 kB, expressed in bytes.
++* `*minMobDataSize*` is the minimum value for the size of MOB data.
++   The default is 512 B, expressed in bytes.
++* `*maxMobDataSize*` is the maximum value for the size of MOB data.
++   The default is 5 kB, expressed in bytes.
+ 
+ 
+ [[mob.cache.configure]]
+ === Configuring the MOB Cache
+ 
+ 
 -Because there can be a large number of MOB files at any time, as compared to 
the number of HFiles, MOB files are not always kept open. The MOB file reader 
cache is a LRU cache which keeps the most recently used MOB files open. To 
configure the MOB file reader's cache on each RegionServer, add the following 
properties to the RegionServer's `hbase-site.xml`, customize the configuration 
to suit your environment, and restart or rolling restart the RegionServer.
++Because there can be a large number of MOB files at any time, as compared to 
the number of HFiles,
++MOB files are not always kept open. The MOB file reader cache is a LRU cache 
which keeps the most
++recently used MOB files open. To configure the MOB file reader's cache on 
each RegionServer, add
++the following properties to the RegionServer's `hbase-site.xml`, customize 
the configuration to
++suit your environment, and restart or rolling restart the RegionServer.
+ 
+ .Example MOB Cache Configuration
+ ====
+ [source,xml]
+ ----
+ <property>
+     <name>hbase.mob.file.cache.size</name>
+     <value>1000</value>
+     <description>
+       Number of opened file handlers to cache.
+       A larger value will benefit reads by provinding more file handlers per 
mob
+       file cache and would reduce frequent file opening and closing.
+       However, if this is set too high, this could lead to a "too many opened 
file handers"
+       The default value is 1000.
+     </description>
+ </property>
+ <property>
+     <name>hbase.mob.cache.evict.period</name>
+     <value>3600</value>
+     <description>
+       The amount of time in seconds after which an unused file is evicted 
from the
+       MOB cache. The default value is 3600 seconds.
+     </description>
+ </property>
+ <property>
+     <name>hbase.mob.cache.evict.remain.ratio</name>
+     <value>0.5f</value>
+     <description>
+       A multiplier (between 0.0 and 1.0), which determines how many files 
remain cached
+       after the threshold of files that remains cached after a cache eviction 
occurs
+       which is triggered by reaching the `hbase.mob.file.cache.size` 
threshold.
+       The default value is 0.5f, which means that half the files (the 
least-recently-used
+       ones) are evicted.
+     </description>
+ </property>
+ ----
+ ====
+ 
+ === MOB Optimization Tasks
+ 
+ ==== Manually Compacting MOB Files
+ 
+ To manually compact MOB files, rather than waiting for the
+ <<mob.cache.configure,configuration>> to trigger compaction, use the
+ `compact_mob` or `major_compact_mob` HBase shell commands. These commands
+ require the first argument to be the table name, and take an optional column
+ family as the second argument. If the column family is omitted, all 
MOB-enabled
+ column families are compacted.
+ 
+ ----
+ hbase> compact_mob 't1', 'c1'
+ hbase> compact_mob 't1'
+ hbase> major_compact_mob 't1', 'c1'
+ hbase> major_compact_mob 't1'
+ ----
+ 
+ These commands are also available via `Admin.compactMob` and
+ `Admin.majorCompactMob` methods.
+ 
+ ==== MOB Sweeper
+ 
+ HBase MOB a MapReduce job called the Sweeper tool for
+ optimization. The Sweeper tool oalesces small MOB files or MOB files with many
+ deletions or updates. The Sweeper tool is not required if you use native MOB 
compaction, which
+ does not rely on MapReduce.
+ 
+ To configure the Sweeper tool, set the following options:
+ 
+ [source,xml]
+ ----
+ <property>
+     <name>hbase.mob.sweep.tool.compaction.ratio</name>
+     <value>0.5f</value>
+     <description>
+       If there are too many cells deleted in a mob file, it's regarded
+       as an invalid file and needs to be merged.
+       If existingCellsSize/mobFileSize is less than ratio, it's regarded
+       as an invalid file. The default value is 0.5f.
+     </description>
+ </property>
+ <property>
+     <name>hbase.mob.sweep.tool.compaction.mergeable.size</name>
+     <value>134217728</value>
+     <description>
+       If the size of a mob file is less than this value, it's regarded as a 
small
+       file and needs to be merged. The default value is 128MB.
+     </description>
+ </property>
+ <property>
+     <name>hbase.mob.sweep.tool.compaction.memstore.flush.size</name>
+     <value>134217728</value>
+     <description>
+       The flush size for the memstore used by sweep job. Each sweep reducer 
owns such a memstore.
+       The default value is 128MB.
+     </description>
+ </property>
+ <property>
+     <name>hbase.master.mob.ttl.cleaner.period</name>
+     <value>86400</value>
+     <description>
+       The period that ExpiredMobFileCleanerChore runs. The unit is second.
+       The default value is one day.
+     </description>
+ </property>
+ ----
+ 
 -Next, add the HBase install directory, _`$HBASE_HOME`/*_, and HBase library 
directory to _yarn-site.xml_ Adjust this example to suit your environment.
++Next, add the HBase install directory, _`$HBASE_HOME`/*_, and HBase library 
directory to
++_yarn-site.xml_ Adjust this example to suit your environment.
+ [source,xml]
+ ----
+ <property>
+     <description>Classpath for typical applications.</description>
+     <name>yarn.application.classpath</name>
+     <value>
+         $HADOOP_CONF_DIR,
+         $HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
+         $HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
+         $HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
+         $HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*,
+         $HBASE_HOME/*, $HBASE_HOME/lib/*
+     </value>
+ </property>
+ ----
+ 
+ Finally, run the `sweeper` tool for each column which is configured for MOB.
+ [source,bash]
+ ----
+ $ org.apache.hadoop.hbase.mob.compactions.Sweeper _tableName_ _familyName_
+ ----

Reply via email to