[1/2] hbase git commit: HBASE-14635 Fix flaky test TestSnapshotCloneIndependence

2016-05-25 Thread mbertozzi
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a049b5f79 -> 7af013b26
  refs/heads/master b3362ccb0 -> 94696d406


HBASE-14635 Fix flaky test TestSnapshotCloneIndependence

Signed-off-by: Matteo Bertozzi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/94696d40
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/94696d40
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/94696d40

Branch: refs/heads/master
Commit: 94696d4067165b09818615809a10e609321e7f22
Parents: b3362cc
Author: Apekshit 
Authored: Wed May 25 16:07:22 2016 -0700
Committer: Matteo Bertozzi 
Committed: Wed May 25 21:14:06 2016 -0700

--
 .../client/TestSnapshotCloneIndependence.java   | 368 +++
 1 file changed, 141 insertions(+), 227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/94696d40/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
index 002e04e..4a1d7f5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
 import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,37 +33,55 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.TestRestoreFlushSnapshotFromClient;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
 
 /**
  * Test to verify that the cloned table is independent of the table from which 
it was cloned
  */
-@Category({LargeTests.class, ClientTests.class})
+@Category({MediumTests.class, ClientTests.class})
 public class TestSnapshotCloneIndependence {
   private static final Log LOG = 
LogFactory.getLog(TestSnapshotCloneIndependence.class);
 
+  @Rule
+  public Timeout globalTimeout = Timeout.seconds(60);
+
+  @Rule
+  public TestName testName = new TestName();
+
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 
   protected static final int NUM_RS = 2;
   private static final String STRING_TABLE_NAME = "test";
   private static final String TEST_FAM_STR = "fam";
   protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
-  protected static final TableName TABLE_NAME = 
TableName.valueOf(STRING_TABLE_NAME);
   private static final int CLEANER_INTERVAL = 100;
 
+  private FileSystem fs;
+  private Path rootDir;
+  private Admin admin;
+  private TableName originalTableName;
+  private Table originalTable;
+  private TableName cloneTableName;
+  private int countOriginalTable;
+  String snapshotNameAsString;
+  byte[] snapshotName;
+
   /**
* Setup the config for the cluster and start it
-   * @throws Exception on fOailure
*/
   @BeforeClass
   public static void setupCluster() throws Exception {
@@ -104,12 +121,25 @@ public class TestSnapshotCloneIndependence {
 
   @Before
   public void setup() throws Exception {
-createTable(TABLE_NAME, TEST_FAM);
+fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+
+admin = UTIL.getHBaseAdmin();
+originalTableName = TableName.valueOf("test" + testName.getMethodName());
+cloneTableName = TableName.valueOf("test-clone-" + originalTableName);
+snapshotNameAsString = "snapshot_" + originalTableName;
+snapshotName = Bytes.toBytes(snapshotNameAsString);
+
+originalTable = createTable(originalTableName, TEST_FAM);
+loadData(originalTable, TEST_FAM);
+countOriginalTable = countRows(originalTable);
+System.out.println("Original table 

[2/2] hbase git commit: HBASE-14635 Fix flaky test TestSnapshotCloneIndependence

2016-05-25 Thread mbertozzi
HBASE-14635 Fix flaky test TestSnapshotCloneIndependence

Signed-off-by: Matteo Bertozzi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7af013b2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7af013b2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7af013b2

Branch: refs/heads/branch-1
Commit: 7af013b26747c5da3dd8000ae1f674e24811edf4
Parents: a049b5f
Author: Apekshit 
Authored: Wed May 25 16:07:22 2016 -0700
Committer: Matteo Bertozzi 
Committed: Wed May 25 21:24:09 2016 -0700

--
 .../client/TestSnapshotCloneIndependence.java   | 368 +++
 1 file changed, 141 insertions(+), 227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7af013b2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
index f21fcfc..31ab944 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
 import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,38 +33,56 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.TestRestoreFlushSnapshotFromClient;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
 
 /**
  * Test to verify that the cloned table is independent of the table from which 
it was cloned
  */
-@Category({LargeTests.class, ClientTests.class})
+@Category({MediumTests.class, ClientTests.class})
 public class TestSnapshotCloneIndependence {
   private static final Log LOG = 
LogFactory.getLog(TestSnapshotCloneIndependence.class);
 
+  @Rule
+  public Timeout globalTimeout = Timeout.seconds(60);
+
+  @Rule
+  public TestName testName = new TestName();
+
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 
   protected static final int NUM_RS = 2;
   private static final String STRING_TABLE_NAME = "test";
   private static final String TEST_FAM_STR = "fam";
   protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
-  protected static final TableName TABLE_NAME = 
TableName.valueOf(STRING_TABLE_NAME);
   private static final int CLEANER_INTERVAL = 100;
 
+  private FileSystem fs;
+  private Path rootDir;
+  private Admin admin;
+  private TableName originalTableName;
+  private Table originalTable;
+  private TableName cloneTableName;
+  private int countOriginalTable;
+  String snapshotNameAsString;
+  byte[] snapshotName;
+
   /**
* Setup the config for the cluster and start it
-   * @throws Exception on fOailure
*/
   @BeforeClass
   public static void setupCluster() throws Exception {
@@ -105,12 +122,25 @@ public class TestSnapshotCloneIndependence {
 
   @Before
   public void setup() throws Exception {
-createTable(TABLE_NAME, TEST_FAM);
+fs = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+rootDir = 
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+
+admin = UTIL.getHBaseAdmin();
+originalTableName = TableName.valueOf("test" + testName.getMethodName());
+cloneTableName = TableName.valueOf("test-clone-" + originalTableName);
+snapshotNameAsString = "snapshot_" + originalTableName;
+snapshotName = Bytes.toBytes(snapshotNameAsString);
+
+originalTable = createTable(originalTableName, TEST_FAM);
+loadData(originalTable, TEST_FAM);
+countOriginalTable = countRows(originalTable);
+System.out.println("Original table has: " + countOriginalTable + " rows");
   }
 
   @After
   public void tearDown() throws Exceptio

hbase git commit: HBASE-15884 NPE in StoreFileScanner#skipKVsNewerThanReadpoint during reverse scan (Sergey Soldatov)

2016-05-25 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b2e8a4158 -> f6e145e23


HBASE-15884 NPE in StoreFileScanner#skipKVsNewerThanReadpoint during reverse 
scan (Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f6e145e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f6e145e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f6e145e2

Branch: refs/heads/branch-1.3
Commit: f6e145e23d747d42892f24bc7766400a2e852d1b
Parents: b2e8a41
Author: tedyu 
Authored: Wed May 25 17:31:42 2016 -0700
Committer: tedyu 
Committed: Wed May 25 17:31:42 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/StoreFileScanner.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f6e145e2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 4055188..77857b0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -234,9 +234,9 @@ public class StoreFileScanner implements KeyValueScanner {
 while(enforceMVCC
 && cur != null
 && (cur.getMvccVersion() > readPt)) {
-  hfs.next();
+  boolean hasNext = hfs.next();
   setCurrentCell(hfs.getKeyValue());
-  if (this.stopSkippingKVsIfNextRow
+  if (hasNext && this.stopSkippingKVsIfNextRow
   && getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(),
   cur.getRowLength(), startKV.getRowArray(), 
startKV.getRowOffset(),
   startKV.getRowLength()) > 0) {



hbase git commit: HBASE-15884 NPE in StoreFileScanner#skipKVsNewerThanReadpoint during reverse scan (Sergey Soldatov)

2016-05-25 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 2346b5e21 -> a049b5f79


HBASE-15884 NPE in StoreFileScanner#skipKVsNewerThanReadpoint during reverse 
scan (Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a049b5f7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a049b5f7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a049b5f7

Branch: refs/heads/branch-1
Commit: a049b5f799daa50bd658c8f11fd18275666e3927
Parents: 2346b5e
Author: tedyu 
Authored: Wed May 25 17:29:32 2016 -0700
Committer: tedyu 
Committed: Wed May 25 17:29:32 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/StoreFileScanner.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a049b5f7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 0608756..02a4cae 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -257,9 +257,9 @@ public class StoreFileScanner implements KeyValueScanner {
 while(enforceMVCC
 && cur != null
 && (cur.getMvccVersion() > readPt)) {
-  hfs.next();
+  boolean hasNext = hfs.next();
   setCurrentCell(hfs.getKeyValue());
-  if (this.stopSkippingKVsIfNextRow
+  if (hasNext && this.stopSkippingKVsIfNextRow
   && getComparator().compareRows(cur.getRowArray(), cur.getRowOffset(),
   cur.getRowLength(), startKV.getRowArray(), 
startKV.getRowOffset(),
   startKV.getRowLength()) > 0) {



hbase git commit: HBASE-15471 Added in Priority, General, and Replication queue sizes for RegionServers to the Web UI

2016-05-25 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1 627b48b79 -> 2346b5e21


HBASE-15471 Added in Priority, General, and Replication queue sizes for 
RegionServers to the Web UI

Changed UI labels so that queue "size" refers to size in bytes and queue 
"length" refers to number of items in queue.

Signed-off-by: Elliott Clark 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2346b5e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2346b5e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2346b5e2

Branch: refs/heads/branch-1
Commit: 2346b5e2169c802ed77554f4a8a79cd97c07ed3f
Parents: 627b48b
Author: Joseph Hwang 
Authored: Tue May 17 15:52:26 2016 -0700
Committer: Elliott Clark 
Committed: Wed May 25 15:15:01 2016 -0700

--
 .../hbase/tmpl/regionserver/ServerMetricsTmpl.jamon | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2346b5e2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 29f0791..2305f2e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -191,14 +191,20 @@ MetricsHBaseServerWrapper mServerWrap;
 
 
 
-Compaction Queue Size
-Flush Queue Size
-Call Queue Size (bytes)
+Compaction Queue Length
+Flush Queue Length
+Priority Call Queue Length
+General Call Queue Length
+Replication Call Queue Length
+Total Call Queue Size (bytes)
 
 
 
 <% mWrap.getCompactionQueueSize() %>
 <% mWrap.getFlushQueueSize() %>
+<% mServerWrap.getPriorityQueueLength() %>
+<% mServerWrap.getGeneralQueueLength() %>
+<% mServerWrap.getReplicationQueueLength() %>
 <% 
TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(), "B", 1) 
%>
 
 



hbase git commit: HBASE-15471 Added in Priority, General, and Replication queue sizes for RegionServers to the Web UI

2016-05-25 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/master fa74baeb4 -> b3362ccb0


HBASE-15471 Added in Priority, General, and Replication queue sizes for 
RegionServers to the Web UI

Changed UI labels so that queue "size" refers to size in bytes and queue 
"length" refers to number of items in queue.

Signed-off-by: Elliott Clark 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3362ccb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3362ccb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3362ccb

Branch: refs/heads/master
Commit: b3362ccb0c35a493a8a97fa5593650bb08b736f9
Parents: fa74bae
Author: Joseph Hwang 
Authored: Tue May 17 15:52:26 2016 -0700
Committer: Elliott Clark 
Committed: Wed May 25 15:10:48 2016 -0700

--
 .../hbase/tmpl/regionserver/ServerMetricsTmpl.jamon | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3362ccb/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index f5aa478..82cb4e7 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -193,14 +193,20 @@ MetricsHBaseServerWrapper mServerWrap;
 
 
 
-Compaction Queue Size
-Flush Queue Size
-Call Queue Size (bytes)
+Compaction Queue Length
+Flush Queue Length
+Priority Call Queue Length
+General Call Queue Length
+Replication Call Queue Length
+Total Call Queue Size (bytes)
 
 
 
 <% mWrap.getCompactionQueueSize() %>
 <% mWrap.getFlushQueueSize() %>
+<% mServerWrap.getPriorityQueueLength() %>
+<% mServerWrap.getGeneralQueueLength() %>
+<% mServerWrap.getReplicationQueueLength() %>
 <% 
TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(), "B", 1) 
%>
 
 



hbase git commit: HBASE-15471 Added in Priority, General, and Replication queue sizes for RegionServers to the Web UI

2016-05-25 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 37e080d70 -> b2e8a4158


HBASE-15471 Added in Priority, General, and Replication queue sizes for 
RegionServers to the Web UI

Changed UI labels so that queue "size" refers to size in bytes and queue 
"length" refers to number of items in queue.

Signed-off-by: Elliott Clark 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2e8a415
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2e8a415
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2e8a415

Branch: refs/heads/branch-1.3
Commit: b2e8a415887e22decd4191876b34f1875b987776
Parents: 37e080d
Author: Joseph Hwang 
Authored: Tue May 17 15:52:26 2016 -0700
Committer: Elliott Clark 
Committed: Wed May 25 15:15:11 2016 -0700

--
 .../hbase/tmpl/regionserver/ServerMetricsTmpl.jamon | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2e8a415/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 29f0791..2305f2e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -191,14 +191,20 @@ MetricsHBaseServerWrapper mServerWrap;
 
 
 
-Compaction Queue Size
-Flush Queue Size
-Call Queue Size (bytes)
+Compaction Queue Length
+Flush Queue Length
+Priority Call Queue Length
+General Call Queue Length
+Replication Call Queue Length
+Total Call Queue Size (bytes)
 
 
 
 <% mWrap.getCompactionQueueSize() %>
 <% mWrap.getFlushQueueSize() %>
+<% mServerWrap.getPriorityQueueLength() %>
+<% mServerWrap.getGeneralQueueLength() %>
+<% mServerWrap.getReplicationQueueLength() %>
 <% 
TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(), "B", 1) 
%>
 
 



hbase git commit: HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span

2016-05-25 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 921ecef38 -> db7d17c89


HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/db7d17c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/db7d17c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/db7d17c8

Branch: refs/heads/branch-1.1
Commit: db7d17c897a064be342751a8f7092fc32cee1048
Parents: 921ecef
Author: Mikhail Antonov 
Authored: Mon May 23 12:51:44 2016 -0700
Committer: Mikhail Antonov 
Committed: Wed May 25 13:51:27 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/db7d17c8/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 915b2b5..0288c39 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -861,7 +861,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 }
 
 protected void tracedWriteRequest(Call call, int priority, Span span) 
throws IOException {
-  TraceScope ts = Trace.continueSpan(span);
+  TraceScope ts = Trace.startSpan("RpcClientImpl.tracedWriteRequest", 
span);
   try {
 writeRequest(call, priority, span);
   } finally {



[1/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)

2016-05-25 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 27c7cf6bf -> 82735499a


http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
new file mode 100644
index 000..3630d87
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BackupAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupAdmin extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestBackupAdmin.class);
+  //implement all test cases in 1 test since incremental backup/restore has 
dependencies
+  @Test
+  public void TestIncBackupRestoreWithAdminAPI() throws Exception {
+// #1 - create full backup for all tables
+LOG.info("create full backup image for all tables");
+
+List tables = Lists.newArrayList(table1, table2, table3, 
table4);
+HBaseAdmin admin = null;
+BackupAdmin backupAdmin = null;
+Connection conn = ConnectionFactory.createConnection(conf1);
+admin = (HBaseAdmin) conn.getAdmin();
+backupAdmin =  admin.getBackupAdmin();
+BackupRequest request = new BackupRequest();
+
request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
+String backupIdFull = backupAdmin.backupTables(request);
+
+assertTrue(checkSucceeded(backupIdFull));
+
+// #2 - insert some data to table
+HTable t1 = (HTable) conn.getTable(table1);
+Put p1;
+for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+  p1 = new Put(Bytes.toBytes("row-t1" + i));
+  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t1.put(p1);
+}
+
+Assert.assertThat(TEST_UTIL.countRows(t1), 
CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+t1.close();
+
+HTable t2 =  (HTable) conn.getTable(table2);
+Put p2;
+for (int i = 0; i < 5; i++) {
+  p2 = new Put(Bytes.toBytes("row-t2" + i));
+  p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t2.put(p2);
+}
+
+Assert.assertThat(TEST_UTIL.countRows(t2), 
CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+t2.close();
+
+// #3 - incremental backup for multiple tables
+tables = Lists.newArrayList(table1, table2, table3);
+request = new BackupRequest();
+request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+.setTargetRootDir(BACKUP_ROOT_DIR);
+String backupIdIncMultiple = backupAdmin.backupTables(request);
+assertTrue(checkSucceeded(backupIdIncMultiple));
+
+// #4 - restore full backup for all tables, without overwrite
+TableName[] tablesRestoreFull =
+new TableName[] { table1, table2, table3, table4 };
+
+TableName[] tablesMapFull =
+new TableName[] { table1_restore, table2_restore, table3_restore, 
table4_restore };
+
+RestoreRequest restoreRequest = new RestoreRequest();
+restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdFull).
+  setCheck(false).setAutorestore(false).setOverwrite(false).
+  setFromTables(tablesRestoreFull).setToTables(tablesMapFull);
+ 

[5/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)

2016-05-25 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
new file mode 100644
index 000..777d916
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -0,0 +1,807 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+
+/**
+ * This class provides 'hbase:backup' table API
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupSystemTable implements Closeable {
+  
+  static class WALItem {
+String backupId;
+String walFile;
+String backupRoot;
+
+WALItem(String backupId, String walFile, String backupRoot)
+{
+  this.backupId = backupId;
+  this.walFile = walFile;
+  this.backupRoot = backupRoot;
+}
+
+public String getBackupId() {
+  return backupId;
+}
+
+public String getWalFile() {
+  return walFile;
+}
+
+public String getBackupRoot() {
+  return backupRoot;
+}
+
+public String toString() {
+  return "/"+ backupRoot + "/"+backupId + "/" + walFile;
+}
+
+  }
+  
+  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
+  private final static TableName tableName = TableName.BACKUP_TABLE_NAME;  
+  // Stores backup sessions (contexts)
+  final static byte[] SESSIONS_FAMILY = "session".getBytes();
+  // Stores other meta 
+  final static byte[] META_FAMILY = "meta".getBytes();
+  // Connection to HBase cluster, shared
+  // among all instances
+  private final Connection connection;
+
+  public BackupSystemTable(Connection conn) throws IOException {
+this.connection = conn;
+  }
+
+ 
+  public void close() {
+ // do nothing 
+  }
+
+  /**
+   * Updates status (state) of a backup session in hbase:backup table
+   * @param context context
+   * @throws IOException exception
+   */
+  public void updateBackupInfo(BackupInfo context) throws IOException {
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("update backup status in hbase:backup for: " + 
context.getBackupId()
++ " set status=" + context.getState());
+}
+try (Table table = connection.getTable(tableName)) {
+  Put put = BackupSystemTableHelper.createPutForBackupContext(context);
+  table.put(put);
+}
+  }
+
+  /

[6/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)

2016-05-25 Thread tedyu
HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include 
backup/restore - related API (Vladimir)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/82735499
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/82735499
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/82735499

Branch: refs/heads/HBASE-7912
Commit: 82735499a73c0aba2c53a22776e5e54abc5e0eb1
Parents: 27c7cf6
Author: tedyu 
Authored: Wed May 25 13:18:44 2016 -0700
Committer: tedyu 
Committed: Wed May 25 13:18:44 2016 -0700

--
 .../hadoop/hbase/backup/BackupClientUtil.java   | 165 
 .../apache/hadoop/hbase/backup/BackupInfo.java  | 485 +++
 .../backup/BackupRestoreClientFactory.java  |  55 ++
 .../hadoop/hbase/backup/BackupStatus.java   | 104 +++
 .../hadoop/hbase/backup/RestoreClient.java  |  49 ++
 .../hadoop/hbase/backup/RestoreRequest.java | 108 +++
 .../hbase/backup/impl/BackupCommands.java   | 551 +
 .../hbase/backup/impl/BackupException.java  |  86 ++
 .../backup/impl/BackupRestoreConstants.java |  47 ++
 .../hbase/backup/impl/BackupSystemTable.java| 807 +++
 .../backup/impl/BackupSystemTableHelper.java| 428 ++
 .../hbase/backup/util/BackupClientUtil.java | 363 +
 .../hadoop/hbase/backup/util/BackupSet.java |  62 ++
 .../org/apache/hadoop/hbase/client/Admin.java   |  35 +-
 .../apache/hadoop/hbase/client/BackupAdmin.java | 155 
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  24 +-
 .../hadoop/hbase/client/HBaseBackupAdmin.java   | 221 +
 .../hadoop/hbase/backup/BackupClient.java   | 108 ---
 .../hadoop/hbase/backup/BackupCopyService.java  |  56 ++
 .../hadoop/hbase/backup/BackupDriver.java   |   4 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  | 484 ---
 .../hbase/backup/BackupRestoreFactory.java  |  98 ---
 .../backup/BackupRestoreServerFactory.java  |  65 ++
 .../hadoop/hbase/backup/BackupStatus.java   | 103 ---
 .../hbase/backup/IncrementalRestoreService.java |  42 +
 .../hadoop/hbase/backup/RestoreClient.java  |  46 --
 .../hadoop/hbase/backup/RestoreDriver.java  |  18 +-
 .../hbase/backup/impl/BackupClientImpl.java | 231 --
 .../hbase/backup/impl/BackupCommands.java   | 559 -
 .../hbase/backup/impl/BackupCopyService.java|  56 --
 .../hbase/backup/impl/BackupException.java  |  86 --
 .../hbase/backup/impl/BackupManifest.java   |   6 +-
 .../backup/impl/BackupRestoreConstants.java |  47 --
 .../hbase/backup/impl/BackupSystemTable.java| 802 --
 .../backup/impl/BackupSystemTableHelper.java| 423 --
 .../hadoop/hbase/backup/impl/BackupUtil.java| 472 ---
 .../backup/impl/IncrementalBackupManager.java   |  11 +-
 .../backup/impl/IncrementalRestoreService.java  |  42 -
 .../hbase/backup/impl/RestoreClientImpl.java|  19 +-
 .../hadoop/hbase/backup/impl/RestoreUtil.java   | 608 --
 .../mapreduce/MapReduceBackupCopyService.java   |   6 +-
 .../mapreduce/MapReduceRestoreService.java  |   6 +-
 .../hbase/backup/master/BackupLogCleaner.java   |   2 +-
 .../backup/master/FullTableBackupProcedure.java |  18 +-
 .../master/IncrementalTableBackupProcedure.java |  16 +-
 .../hbase/backup/util/BackupServerUtil.java | 477 +++
 .../hadoop/hbase/backup/util/BackupSet.java |  62 --
 .../hbase/backup/util/RestoreServerUtil.java| 609 ++
 .../hadoop/hbase/HBaseTestingUtility.java   |  10 +
 .../hadoop/hbase/backup/TestBackupAdmin.java| 186 +
 .../hadoop/hbase/backup/TestBackupBase.java |  42 +-
 .../hadoop/hbase/backup/TestBackupDelete.java   |  12 +-
 .../hadoop/hbase/backup/TestBackupDescribe.java |   2 +-
 .../hbase/backup/TestBackupShowHistory.java |   2 +-
 .../hbase/backup/TestBackupStatusProgress.java  |   4 +-
 .../hbase/backup/TestBackupSystemTable.java |   8 +-
 .../hadoop/hbase/backup/TestFullBackup.java |  24 +-
 .../hadoop/hbase/backup/TestFullRestore.java|  47 +-
 .../hbase/backup/TestIncrementalBackup.java |  26 +-
 .../backup/TestIncrementalBackupNoDataLoss.java |  12 +-
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   4 +-
 .../hbase/backup/TestRestoreBoundaryTests.java  |   8 +-
 .../hbase/client/TestMetaWithReplicas.java  |   3 +
 .../TestMasterOperationsForRegionReplicas.java  |   7 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   4 +-
 .../util/hbck/OfflineMetaRebuildTestCore.java   |   2 +-
 .../util/hbck/TestOfflineMetaRebuildBase.java   |   4 +-
 .../util/hbck/TestOfflineMetaRebuildHole.java   |   4 +-
 .../hbck/TestOfflineMetaRebuildOverlap.java |   4 +-
 69 files changed, 5165 insertions(+), 4577 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/827

[4/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)

2016-05-25 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
deleted file mode 100644
index 784cff6..000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
+++ /dev/null
@@ -1,484 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
-import 
org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder;
-import 
org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus;
-
-
-/**
- * An object to encapsulate the information for each backup request
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class BackupInfo implements Comparable {
-  private static final Log LOG = LogFactory.getLog(BackupInfo.class);
-  // backup status flag
-  public static enum BackupState {
-WAITING, RUNNING, COMPLETE, FAILED, CANCELLED;
-  }
-  // backup phase
-  public static enum BackupPhase {
-SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
-  }
-
-  // backup id: a timestamp when we request the backup
-  private String backupId;
-
-  // backup type, full or incremental
-  private BackupType type;
-
-  // target root directory for storing the backup files
-  private String targetRootDir;
-
-  // overall backup state
-  private BackupState state;
-
-  // overall backup phase
-  private BackupPhase phase;
-
-  // overall backup failure message
-  private String failedMsg;
-
-  // backup status map for all tables
-  private Map backupStatusMap;
-
-  // actual start timestamp of the backup process
-  private long startTs;
-
-  // actual end timestamp of the backup process, could be fail or complete
-  private long endTs;
-
-  // the total bytes of incremental logs copied
-  private long totalBytesCopied;
-
-  // for incremental backup, the location of the backed-up hlogs
-  private String hlogTargetDir = null;
-
-  // incremental backup file list
-  transient private List incrBackupFileList;
-
-  // new region server log timestamps for table set after distributed log roll
-  // key - table name, value - map of RegionServer hostname -> last log rolled 
timestamp
-  transient private HashMap> 
tableSetTimestampMap;
-
-  // backup progress in %% (0-100)
-  private int progress;
-  
-  // distributed job id
-  private String jobId;
-  
-  // Number of parallel workers. -1 - system defined
-  private int workers = -1;
- 
-  // Bandwidth per worker in MB per sec. -1 - unlimited
-  private long bandwidth = -1;  
-  
-  public BackupInfo() {
-  }
-
-  public BackupInfo(String backupId, BackupType type, TableName[] tables, 
String targetRootDir) {
-backupStatusMap = new HashMap();
-
-this.backupId = backupId;
-this.type = type;
-this.targetRootDir = targetRootDir;
-if(LOG.isDebugEnabled()){
-  LOG.debug("CreateBackupContext: " + tables.length+" "+tables[0] );
-}
-this.addTables(tables);
-
-if (type == BackupType.INCREMENTAL) {
-  setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, 
backupId));
-}
-
-this.startTs = 0;
-this.endTs = 0;
-  }
-
-  public String getJobId() {
-return jobId;
-  }
-
-  public void setJobId(String jobId) {
-t

[3/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)

2016-05-25 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
deleted file mode 100644
index 8c3c2be..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ /dev/null
@@ -1,802 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
-
-/**
- * This class provides 'hbase:backup' table API
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupSystemTable implements Closeable {
-  
-  static class WALItem {
-String backupId;
-String walFile;
-String backupRoot;
-
-WALItem(String backupId, String walFile, String backupRoot)
-{
-  this.backupId = backupId;
-  this.walFile = walFile;
-  this.backupRoot = backupRoot;
-}
-
-public String getBackupId() {
-  return backupId;
-}
-
-public String getWalFile() {
-  return walFile;
-}
-
-public String getBackupRoot() {
-  return backupRoot;
-}
-
-public String toString() {
-  return backupRoot+"/"+backupId + "/" + walFile;
-}
-
-  }
-  
-  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
-  private final static TableName tableName = TableName.BACKUP_TABLE_NAME;  
-  // Stores backup sessions (contexts)
-  final static byte[] SESSIONS_FAMILY = "session".getBytes();
-  // Stores other meta 
-  final static byte[] META_FAMILY = "meta".getBytes();
-  // Connection to HBase cluster, shared
-  // among all instances
-  private final Connection connection;
-
-  public BackupSystemTable(Connection conn) throws IOException {
-this.connection = conn;
-  }
-
- 
-  public void close() {
- // do nothing 
-  }
-
-  /**
-   * Updates status (state) of a backup session in hbase:backup table
-   * @param context context
-   * @throws IOException exception
-   */
-  public void updateBackupInfo(BackupInfo context) throws IOException {
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("update backup status in hbase:backup for: " + 
context.getBackupId()
-+ " set status=" + context.getState());
-}
-try (Table table = connection.getTable(tableName)) {
-  Put put = BackupSystemTableHelper.createPutForBackupContext(context);
-  table.put(put);
-}
-  }
-
-  /**
-   * Deletes backup status from hbase:backup table
-   * @param backupId backup id
-   * @return true, if 

[2/6] hbase git commit: HBASE-14140 HBase Backup/Restore Phase 3: Enhance HBaseAdmin API to include backup/restore - related API (Vladimir)

2016-05-25 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/82735499/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java
deleted file mode 100644
index 592770b..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java
+++ /dev/null
@@ -1,608 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * A collection for methods used by multiple classes to restore HBase tables.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class RestoreUtil {
-
-  public static final Log LOG = LogFactory.getLog(RestoreUtil.class);
-
-  private final String[] ignoreDirs = { "recovered.edits" };
-
-  protected Configuration conf = null;
-
-  protected Path backupRootPath;
-
-  protected String backupId;
-
-  protected FileSystem fs;
-  private final String RESTORE_TMP_PATH = "/tmp";
-  private final Path restoreTmpPath;
-
-  // store table name and snapshot dir mapping
-  private final HashMap snapshotMap = new HashMap<>();
-
-  public RestoreUtil(Configuration conf, final Path backupRootPath, final 
String backupId)
-  throws IOException {
-this.conf = conf;
-this.backupRootPath = backupRootPath;
-this.backupId = backupId;
-this.fs = backupRootPath.getFileSystem(conf);
-this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null?
-conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH,
-  "restore");
-  }
-
-  /**
-   * return value represent path for:
-   * 
".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
-   * @param tabelName table name
-   * @return path to table archive
-   * @throws IOException exception
-   */
-  Path getTableArchivePath(TableName tableName)
-  throws IOException {
-Path baseDir = new Path(HBackupFileSystem.getTableBackupPath(tableName, 
backupRootPath, 
-  backupId), HConstants.HFILE_ARCHIVE_DIRECTORY);
-Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
-Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
-Path tableArchivePath =
-new Path(archivePath, tableName.getQualifierAsString());
-if (!fs.exists(tableArchivePath) || 
!fs.getFileStatus(tableArchivePath).isDirecto

hbase git commit: HBASE-15884 NPE in StoreFileScanner#skipKVsNewerThanReadpoint during reverse scan (Sergey Soldatov)

2016-05-25 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 39dc19236 -> fa74baeb4


HBASE-15884 NPE in StoreFileScanner#skipKVsNewerThanReadpoint during reverse 
scan (Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fa74baeb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fa74baeb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fa74baeb

Branch: refs/heads/master
Commit: fa74baeb409778de71c8a92b115b46dc63f313a0
Parents: 39dc192
Author: tedyu 
Authored: Wed May 25 08:58:28 2016 -0700
Committer: tedyu 
Committed: Wed May 25 08:58:28 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/StoreFileScanner.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fa74baeb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index abade0e..4955ffe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -253,9 +253,9 @@ public class StoreFileScanner implements KeyValueScanner {
 while(enforceMVCC
 && cur != null
 && (cur.getSequenceId() > readPt)) {
-  hfs.next();
+  boolean hasNext = hfs.next();
   setCurrentCell(hfs.getCell());
-  if (this.stopSkippingKVsIfNextRow
+  if (hasNext && this.stopSkippingKVsIfNextRow
   && getComparator().compareRows(cur, startKV) > 0) {
 return false;
   }