Repository: hbase
Updated Branches:
  refs/heads/master 1bc996aa5 -> ad5cd50df


http://git-wip-us.apache.org/repos/asf/hbase/blob/ad5cd50d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
deleted file mode 100644
index 88513ea..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
+++ /dev/null
@@ -1,400 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HTestConst;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Category({ ReplicationTests.class, LargeTests.class })
-public class TestSerialReplication {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestSerialReplication.class);
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestSerialReplication.class);
-
-  private static Configuration conf1;
-  private static Configuration conf2;
-
-  private static HBaseTestingUtility utility1;
-  private static HBaseTestingUtility utility2;
-
-  private static final byte[] famName = Bytes.toBytes("f");
-  private static final byte[] VALUE = Bytes.toBytes("v");
-  private static final byte[] ROW = Bytes.toBytes("r");
-  private static final byte[][] ROWS = HTestConst.makeNAscii(ROW, 100);
-
-  @Rule
-  public TestName name = new TestName();
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    conf1 = HBaseConfiguration.create();
-    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
-    // smaller block size and capacity to trigger more operations
-    // and test them
-    conf1.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
-    conf1.setInt("replication.source.size.capacity", 1024);
-    conf1.setLong("replication.source.sleepforretries", 100);
-    conf1.setInt("hbase.regionserver.maxlogs", 10);
-    conf1.setLong("hbase.master.logcleaner.ttl", 10);
-    conf1.setBoolean("dfs.support.append", true);
-    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
-    conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
-        
"org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");
-    conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);// Each 
WAL is 120 bytes
-    conf1.setLong("replication.source.size.capacity", 1L);
-    conf1.setLong(HConstants.REPLICATION_SERIALLY_WAITING_KEY, 1000L);
-
-    utility1 = new HBaseTestingUtility(conf1);
-    utility1.startMiniZKCluster();
-    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
-    new ZKWatcher(conf1, "cluster1", null, true);
-
-    conf2 = new Configuration(conf1);
-    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
-
-    utility2 = new HBaseTestingUtility(conf2);
-    utility2.setZkCluster(miniZK);
-    new ZKWatcher(conf2, "cluster2", null, true);
-
-    utility1.startMiniCluster(1, 10);
-    utility2.startMiniCluster(1, 1);
-
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
-    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-    rpc.setClusterKey(utility2.getClusterKey());
-    admin1.addPeer("1", rpc, null);
-
-    utility1.getAdmin().setBalancerRunning(false, true);
-  }
-
-  @Test
-  public void testRegionMoveAndFailover() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    HTableDescriptor table = new HTableDescriptor(tableName);
-    HColumnDescriptor fam = new HColumnDescriptor(famName);
-    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
-    table.addFamily(fam);
-    utility1.getAdmin().createTable(table);
-    utility2.getAdmin().createTable(table);
-    try(Table t1 = utility1.getConnection().getTable(tableName);
-        Table t2 = utility2.getConnection().getTable(tableName)) {
-      LOG.info("move to 1");
-      moveRegion(t1, 1);
-      LOG.info("move to 0");
-      moveRegion(t1, 0);
-      for (int i = 10; i < 20; i++) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-      LOG.info("move to 2");
-      moveRegion(t1, 2);
-      for (int i = 20; i < 30; i++) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-      utility1.getHBaseCluster().abortRegionServer(2);
-      for (int i = 30; i < 40; i++) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-
-      long start = EnvironmentEdgeManager.currentTime();
-      while (EnvironmentEdgeManager.currentTime() - start < 180000) {
-        Scan scan = new Scan();
-        scan.setCaching(100);
-        List<Cell> list = new ArrayList<>();
-        try (ResultScanner results = t2.getScanner(scan)) {
-          for (Result result : results) {
-            assertEquals(1, result.rawCells().length);
-            list.add(result.rawCells()[0]);
-          }
-        }
-        List<Integer> listOfNumbers = getRowNumbers(list);
-        LOG.info(Arrays.toString(listOfNumbers.toArray()));
-        assertIntegerList(listOfNumbers, 10, 1);
-        if (listOfNumbers.size() != 30) {
-          LOG.info("Waiting all logs pushed to slave. Expected 30 , actual " + 
list.size());
-          Thread.sleep(200);
-          continue;
-        }
-        return;
-      }
-      throw new Exception("Not all logs have been pushed");
-    }
-  }
-
-  @Test
-  public void testRegionSplit() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    HTableDescriptor table = new HTableDescriptor(tableName);
-    HColumnDescriptor fam = new HColumnDescriptor(famName);
-    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
-    table.addFamily(fam);
-    utility1.getAdmin().createTable(table);
-    utility2.getAdmin().createTable(table);
-    try(Table t1 = utility1.getConnection().getTable(tableName);
-        Table t2 = utility2.getConnection().getTable(tableName)) {
-
-      for (int i = 10; i < 100; i += 10) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-      utility1.getAdmin().split(tableName, ROWS[50]);
-      waitTableHasRightNumberOfRegions(tableName, 2);
-      for (int i = 11; i < 100; i += 10) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-
-      long start = EnvironmentEdgeManager.currentTime();
-      while (EnvironmentEdgeManager.currentTime() - start < 180000) {
-        Scan scan = new Scan();
-        scan.setCaching(100);
-        List<Cell> list = new ArrayList<>();
-        try (ResultScanner results = t2.getScanner(scan)) {
-          for (Result result : results) {
-            assertEquals(1, result.rawCells().length);
-            list.add(result.rawCells()[0]);
-          }
-        }
-        List<Integer> listOfNumbers = getRowNumbers(list);
-        List<Integer> list1 = new ArrayList<>();
-        List<Integer> list21 = new ArrayList<>();
-        List<Integer> list22 = new ArrayList<>();
-        for (int num : listOfNumbers) {
-          if (num % 10 == 0) {
-            list1.add(num);
-          }else if (num < 50) { //num%10==1
-            list21.add(num);
-          } else { // num%10==1&&num>50
-            list22.add(num);
-          }
-        }
-
-        LOG.info(Arrays.toString(list1.toArray()));
-        LOG.info(Arrays.toString(list21.toArray()));
-        LOG.info(Arrays.toString(list22.toArray()));
-        assertIntegerList(list1, 10, 10);
-        assertIntegerList(list21, 11, 10);
-        assertIntegerList(list22, 51, 10);
-        if (!list21.isEmpty() || !list22.isEmpty()) {
-          assertEquals(9, list1.size());
-        }
-
-        if (list.size() == 18) {
-          return;
-        }
-        LOG.info("Waiting all logs pushed to slave. Expected 27 , actual " + 
list.size());
-        Thread.sleep(200);
-      }
-      throw new Exception("Not all logs have been pushed");
-    }
-  }
-
-  @Test
-  public void testRegionMerge() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    HTableDescriptor table = new HTableDescriptor(tableName);
-    HColumnDescriptor fam = new HColumnDescriptor(famName);
-    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
-    table.addFamily(fam);
-    utility1.getAdmin().createTable(table);
-    utility2.getAdmin().createTable(table);
-    Threads.sleep(5000);
-    utility1.getAdmin().split(tableName, ROWS[50]);
-    waitTableHasRightNumberOfRegions(tableName, 2);
-
-    try(Table t1 = utility1.getConnection().getTable(tableName);
-        Table t2 = utility2.getConnection().getTable(tableName)) {
-      for (int i = 10; i < 100; i += 10) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-      List<Pair<RegionInfo, ServerName>> regions =
-          
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), 
tableName);
-      
utility1.getAdmin().mergeRegionsAsync(regions.get(0).getFirst().getRegionName(),
-          regions.get(1).getFirst().getRegionName(), true);
-      waitTableHasRightNumberOfRegions(tableName, 1);
-      for (int i = 11; i < 100; i += 10) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-
-      long start = EnvironmentEdgeManager.currentTime();
-      while (EnvironmentEdgeManager.currentTime() - start < 180000) {
-        Scan scan = new Scan();
-        scan.setCaching(100);
-        List<Cell> list = new ArrayList<>();
-        try (ResultScanner results = t2.getScanner(scan)) {
-          for (Result result : results) {
-            assertEquals(1, result.rawCells().length);
-            list.add(result.rawCells()[0]);
-          }
-        }
-        List<Integer> listOfNumbers = getRowNumbers(list);
-        List<Integer> list0 = new ArrayList<>();
-        List<Integer> list1 = new ArrayList<>();
-        for (int num : listOfNumbers) {
-          if (num % 10 == 0) {
-            list0.add(num);
-          } else {
-            list1.add(num);
-          }
-        }
-        LOG.info(Arrays.toString(list0.toArray()));
-        LOG.info(Arrays.toString(list1.toArray()));
-        assertIntegerList(list1, 11, 10);
-        if (!list1.isEmpty()) {
-          assertEquals(9, list0.size());
-        }
-        if (list.size() == 18) {
-          return;
-        }
-        LOG.info("Waiting all logs pushed to slave. Expected 18 , actual " + 
list.size());
-        Thread.sleep(200);
-      }
-
-    }
-  }
-
-  private List<Integer> getRowNumbers(List<Cell> cells) {
-    List<Integer> listOfRowNumbers = new ArrayList<>(cells.size());
-    for (Cell c : cells) {
-      listOfRowNumbers.add(Integer.parseInt(Bytes
-          .toString(c.getRowArray(), c.getRowOffset() + ROW.length,
-              c.getRowLength() - ROW.length)));
-    }
-    return listOfRowNumbers;
-  }
-
-  @AfterClass
-  public static void setUpAfterClass() throws Exception {
-    utility2.shutdownMiniCluster();
-    utility1.shutdownMiniCluster();
-  }
-
-  private void moveRegion(Table table, int index) throws IOException {
-    List<Pair<RegionInfo, ServerName>> regions =
-        
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), 
table.getName());
-    assertEquals(1, regions.size());
-    RegionInfo regionInfo = regions.get(0).getFirst();
-    ServerName name = 
utility1.getHBaseCluster().getRegionServer(index).getServerName();
-    utility1.getAdmin()
-        .move(regionInfo.getEncodedNameAsBytes(), 
Bytes.toBytes(name.getServerName()));
-    while (true) {
-      regions =
-          
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), 
table.getName());
-      if (regions.get(0).getSecond().equals(name)) {
-        break;
-      }
-      Threads.sleep(100);
-    }
-  }
-
-  private void balanceTwoRegions(Table table) throws Exception {
-    List<Pair<RegionInfo, ServerName>> regions =
-        
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), 
table.getName());
-    assertEquals(2, regions.size());
-    RegionInfo regionInfo1 = regions.get(0).getFirst();
-    ServerName name1 = 
utility1.getHBaseCluster().getRegionServer(0).getServerName();
-    RegionInfo regionInfo2 = regions.get(1).getFirst();
-    ServerName name2 = 
utility1.getHBaseCluster().getRegionServer(1).getServerName();
-    utility1.getAdmin()
-        .move(regionInfo1.getEncodedNameAsBytes(), 
Bytes.toBytes(name1.getServerName()));
-    utility1.getAdmin()
-        .move(regionInfo2.getEncodedNameAsBytes(), 
Bytes.toBytes(name2.getServerName()));
-    while (true) {
-      regions =
-          
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), 
table.getName());
-      if (regions.get(0).getSecond().equals(name1) && 
regions.get(1).getSecond().equals(name2)) {
-        break;
-      }
-      Threads.sleep(100);
-    }
-  }
-
-  private void waitTableHasRightNumberOfRegions(TableName tableName, int num) 
throws IOException {
-    while (true) {
-      List<Pair<RegionInfo, ServerName>> regions =
-          
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), 
tableName);
-      if (regions.size() == num) {
-        return;
-      }
-      Threads.sleep(100);
-    }
-
-  }
-
-  private void assertIntegerList(List<Integer> list, int start, int step) {
-    int size = list.size();
-    for (int i = 0; i < size; i++) {
-      assertEquals(start + step * i, list.get(i).intValue());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad5cd50d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
index e714bf8..d3b4e8e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
@@ -125,7 +125,7 @@ public class TestGlobalThrottler {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     HTableDescriptor table = new HTableDescriptor(tableName);
     HColumnDescriptor fam = new HColumnDescriptor(famName);
-    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
+    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
     table.addFamily(fam);
     utility1.getAdmin().createTable(table);
     utility2.getAdmin().createTable(table);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad5cd50d/src/main/asciidoc/_chapters/ops_mgt.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 508815f..22047c6 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1367,11 +1367,9 @@ If a slave cluster does run out of room, or is 
inaccessible for other reasons, i
 .Consistency Across Replicated Clusters
 [WARNING]
 ====
-How your application builds on top of the HBase API matters when replication 
is in play. HBase's replication system provides at-least-once delivery of 
client edits for an enabled column family to each configured destination 
cluster. In the event of failure to reach a given destination, the replication 
system will retry sending edits in a way that might repeat a given message. 
HBase provides two ways of replication, one is the original replication and the 
other is serial replication. In the previous way of replication, there is not a 
guaranteed order of delivery for client edits. In the event of a RegionServer 
failing, recovery of the replication queue happens independent of recovery of 
the individual regions that server was previously handling. This means that it 
is possible for the not-yet-replicated edits to be serviced by a RegionServer 
that is currently slower to replicate than the one that handles edits from 
after the failure.
+How your application builds on top of the HBase API matters when replication 
is in play. HBase's replication system provides at-least-once delivery of 
client edits for an enabled column family to each configured destination 
cluster. In the event of failure to reach a given destination, the replication 
system will retry sending edits in a way that might repeat a given message. 
Further more, there is not a guaranteed order of delivery for client edits. In 
the event of a RegionServer failing, recovery of the replication queue happens 
independent of recovery of the individual regions that server was previously 
handling. This means that it is possible for the not-yet-replicated edits to be 
serviced by a RegionServer that is currently slower to replicate than the one 
that handles edits from after the failure.
 
 The combination of these two properties (at-least-once delivery and the lack 
of message ordering) means that some destination clusters may end up in a 
different state if your application makes use of operations that are not 
idempotent, e.g. Increments.
-
-To solve the problem, HBase now supports serial replication, which sends edits 
to destination cluster as the order of requests from client.
 ====
 
 .Terminology Changes
@@ -1412,9 +1410,6 @@ Instead of SQL statements, entire WALEdits (consisting of 
multiple cell inserts
 LOG.info("Replicating "+clusterId + " -> " + peerClusterId);
 ----
 
-.Serial Replication Configuration
-See <<Serial Replication,Serial Replication>>
-
 .Cluster Management Commands
 add_peer <ID> <CLUSTER_KEY>::
   Adds a replication relationship between two clusters. +
@@ -1436,40 +1431,6 @@ enable_table_replication <TABLE_NAME>::
 disable_table_replication <TABLE_NAME>::
   Disable the table replication switch for all its column families.
 
-=== Serial Replication
-
-Note: this feature is introduced in HBase 1.5
-
-.Function of serial replication
-
-Serial replication supports to push logs to the destination cluster in the 
same order as logs reach to the source cluster.
-
-.Why need serial replication?
-In replication of HBase, we push mutations to destination cluster by reading 
WAL in each region server. We have a queue for WAL files so we can read them in 
order of creation time. However, when region-move or RS failure occurs in 
source cluster, the hlog entries that are not pushed before region-move or 
RS-failure will be pushed by original RS(for region move) or another RS which 
takes over the remained hlog of dead RS(for RS failure), and the new entries 
for the same region(s) will be pushed by the RS which now serves the region(s), 
but they push the hlog entries of a same region concurrently without 
coordination.
-
-This treatment can possibly lead to data inconsistency between source and 
destination clusters:
-
-1. there are put and then delete written to source cluster.
-
-2. due to region-move / RS-failure, they are pushed by different 
replication-source threads to peer cluster.
-
-3. if delete is pushed to peer cluster before put, and flush and major-compact 
occurs in peer cluster before put is pushed to peer cluster, the delete is 
collected and the put remains in peer cluster, but in source cluster the put is 
masked by the delete, hence data inconsistency between source and destination 
clusters.
-
-
-.Serial replication configuration
-
-. Set REPLICATION_SCOPE=>2 on the column family which is to be replicated 
serially when creating tables.
-
- REPLICATION_SCOPE is a column family level attribute. Its value can be 0, 1 
or 2. Value 0 means replication is disabled, 1 means replication is enabled but 
which not guarantee log order, and 2 means serial replication is enabled.
-
-. This feature relies on zk-less assignment, and conflicts with distributed 
log replay, so users must set hbase.assignment.usezk=false and 
hbase.master.distributed.log.replay=false to support this feature.(Note that 
distributed log replay is deprecated and has already been purged from 2.0)
-
-.Limitations in serial replication
-
-Now we read and push logs in one RS to one peer in one thread, so if one log 
has not been pushed, all logs after it will be blocked. One wal file may 
contain wal edits from different tables, if one of the tables(or its CF) which 
REPLICATION_SCOPE is 2, and it is blocked, then all edits will be blocked, 
although other tables do not need serial replication. If you want to prevent 
this, then you need to split these tables/cfs into different peers.
-
-More details about serial replication can be found in 
link:https://issues.apache.org/jira/browse/HBASE-9465[HBASE-9465].
-
 === Verifying Replicated Data
 
 The `VerifyReplication` MapReduce job, which is included in HBase, performs a 
systematic comparison of replicated data between two different clusters. Run 
the VerifyReplication job on the master cluster, supplying it with the peer ID 
and table name to use for validation. You can limit the verification further by 
specifying a time range or specific families. The job's short name is 
`verifyrep`. To run the job, use a command like the following:

Reply via email to