HBASE-18699 Copy LoadIncrementalHFiles to another package and mark the old one 
as deprecated


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e53f292
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e53f292
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e53f292

Branch: refs/heads/master
Commit: 9e53f2927b3154eb703560933ddad489c2e232b5
Parents: 7c51d3f
Author: zhangduo <zhang...@apache.org>
Authored: Fri Sep 1 20:27:16 2017 +0800
Committer: zhangduo <zhang...@apache.org>
Committed: Sun Sep 3 19:49:42 2017 +0800

----------------------------------------------------------------------
 .../hbase/backup/impl/RestoreTablesClient.java  |    6 +-
 .../backup/mapreduce/MapReduceRestoreJob.java   |    2 +-
 .../hadoop/hbase/backup/util/BackupUtils.java   |    2 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |    2 +-
 .../hadoop/hbase/backup/TestBackupBase.java     |    2 +-
 .../TestIncrementalBackupWithBulkLoad.java      |    5 +-
 .../client/ColumnFamilyDescriptorBuilder.java   |   10 +-
 .../hbase/coprocessor/TestSecureExport.java     |    2 +-
 ...ReplicationSyncUpToolWithBulkLoadedData.java |    2 +-
 .../mapreduce/IntegrationTestBulkLoad.java      |   23 +-
 .../mapreduce/IntegrationTestImportTsv.java     |    5 +-
 .../hadoop/hbase/mapreduce/CopyTable.java       |    1 +
 .../apache/hadoop/hbase/mapreduce/Driver.java   |    1 +
 .../hbase/mapreduce/HRegionPartitioner.java     |    2 +-
 ...opSecurityEnabledUserProviderForTesting.java |   41 -
 .../hbase/mapreduce/TestHFileOutputFormat2.java |    6 +-
 .../TestLoadIncrementalHFilesSplitRecovery.java |  669 ---------
 .../TestSecureLoadIncrementalHFiles.java        |   70 -
 ...ecureLoadIncrementalHFilesSplitRecovery.java |   69 -
 .../snapshot/TestMobSecureExportSnapshot.java   |    2 +-
 .../snapshot/TestSecureExportSnapshot.java      |    2 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.java  | 1284 +-----------------
 .../compactions/PartitionedMobCompactor.java    |    2 +-
 .../regionserver/HFileReplicator.java           |    4 +-
 .../hbase/tool/LoadIncrementalHFiles.java       | 1251 +++++++++++++++++
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |    2 +-
 .../TestRegionObserverInterface.java            |    5 +-
 .../mapreduce/TestLoadIncrementalHFiles.java    |  763 -----------
 .../regionserver/TestScannerWithBulkload.java   |    2 +-
 .../replication/TestMasterReplication.java      |    2 +-
 ...opSecurityEnabledUserProviderForTesting.java |   41 +
 .../security/access/TestAccessController.java   |   19 +-
 .../hadoop/hbase/tool/MapreduceTestingShim.java |  171 +++
 .../hbase/tool/TestLoadIncrementalHFiles.java   |  723 ++++++++++
 .../TestLoadIncrementalHFilesSplitRecovery.java |  628 +++++++++
 .../tool/TestSecureLoadIncrementalHFiles.java   |   66 +
 ...ecureLoadIncrementalHFilesSplitRecovery.java |   66 +
 .../spark/IntegrationTestSparkBulkLoad.java     |    2 +-
 .../hbasecontext/JavaHBaseBulkLoadExample.java  |    2 +-
 .../hbase/spark/TestJavaHBaseContext.java       |    2 +-
 .../hadoop/hbase/spark/BulkLoadSuite.scala      |    2 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc        |    2 +-
 42 files changed, 3041 insertions(+), 2922 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
index ea7a7b8..ff79533 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
@@ -44,8 +44,8 @@ import org.apache.hadoop.hbase.backup.util.RestoreTool;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem;
 
 /**
  * Restore table implementation
@@ -231,7 +231,7 @@ public class RestoreTablesClient {
         LoadIncrementalHFiles loader = BackupUtils.createLoader(conf);
         for (int i = 0; i < sTableList.size(); i++) {
           if (mapForSrc[i] != null && !mapForSrc[i].isEmpty()) {
-            loaderResult = loader.run(null, mapForSrc[i], tTableArray[i]);
+            loaderResult = loader.run(mapForSrc[i], tTableArray[i]);
             LOG.debug("bulk loading " + sTableList.get(i) + " to " + 
tTableArray[i]);
             if (loaderResult.isEmpty()) {
               String msg = "Couldn't bulk load for " + sTableList.get(i) + " 
to " + tTableArray[i];

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
index 1209e7c..93ea2e5 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.RestoreJob;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.util.Tool;
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
index 11a1a3d..74bfb6c 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
@@ -56,7 +56,7 @@ import 
org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 2e311cf..ab56aaa 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 7fe9a61..8752ca2 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import 
org.apache.hadoop.hbase.mapreduce.HadoopSecurityEnabledUserProviderForTesting;
+import 
org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.SecureTestUtil;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
index 769785f..f63bf29 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -36,8 +36,9 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFiles;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.tool.TestLoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.Assert;
@@ -46,8 +47,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 /**
  * 1. Create table t1
  * 2. Load data to t1

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index d25f9d1..784a250 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -420,6 +420,10 @@ public class ColumnFamilyDescriptorBuilder {
     return this;
   }
 
+  public String getNameAsString() {
+    return desc.getNameAsString();
+  }
+
   public ColumnFamilyDescriptorBuilder setBlockCacheEnabled(boolean value) {
     desc.setBlockCacheEnabled(value);
     return this;
@@ -470,6 +474,10 @@ public class ColumnFamilyDescriptorBuilder {
     return this;
   }
 
+  public Compression.Algorithm getCompressionType() {
+    return desc.getCompressionType();
+  }
+
   public ColumnFamilyDescriptorBuilder setConfiguration(final String key, 
final String value) {
     desc.setConfiguration(key, value);
     return this;
@@ -610,7 +618,7 @@ public class ColumnFamilyDescriptorBuilder {
      */
     @InterfaceAudience.Private
     public ModifyableColumnFamilyDescriptor(final byte[] name) {
-      this(isLegalColumnFamilyName(name), getDefaultValuesBytes(), 
Collections.EMPTY_MAP);
+      this(isLegalColumnFamilyName(name), getDefaultValuesBytes(), 
Collections.emptyMap());
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
----------------------------------------------------------------------
diff --git 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
index 66d99dd..e4cd54d 100644
--- 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
+++ 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
@@ -48,10 +48,10 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.hbase.mapreduce.ExportUtils;
-import 
org.apache.hadoop.hbase.mapreduce.HadoopSecurityEnabledUserProviderForTesting;
 import org.apache.hadoop.hbase.mapreduce.Import;
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos;
 import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
+import 
org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.AccessControlConstants;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
----------------------------------------------------------------------
diff --git 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
index 75f8ee2..0b33d20 100644
--- 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
+++ 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import 
org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfigurationProvider;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 52f1223..cd84163 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -20,9 +20,16 @@ package org.apache.hadoop.hbase.mapreduce;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
@@ -53,6 +60,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.RegionSplitter;
@@ -77,15 +85,8 @@ import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 
 /**
  * Test Bulk Load and MR on a distributed cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
index fb7acf4..246cb5b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
@@ -32,21 +32,22 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.hadoop.util.Tool;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 9cccf8c..513beb4 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.Tool;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java
index 1c69e77..dc5214e 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java
@@ -23,6 +23,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
 import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.util.ProgramDriver;
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
index 3c3060b..5a8ead2 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.mapreduce.Partitioner;
  *
  * <p>This class is not suitable as partitioner creating hfiles
  * for incremental bulk loads as region spread will likely change between time 
of
- * hfile creation and load time. See {@link LoadIncrementalHFiles}
+ * hfile creation and load time. See {@link 
org.apache.hadoop.hbase.tool.LoadIncrementalHFiles}
  * and <a href="http://hbase.apache.org/book.html#arch.bulk.load";>Bulk 
Load</a>.</p>
  *
  * @param <KEY>  The type of the key.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java
deleted file mode 100644
index b342f64..0000000
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import org.apache.hadoop.hbase.security.UserProvider;
-
-/**
- * A {@link UserProvider} that always says hadoop security is enabled, 
regardless of the underlying
- * configuration. HBase security is <i>not enabled</i> as this is used to 
determine if SASL is used
- * to do the authentication, which requires a Kerberos ticket (which we 
currently don't have in
- * tests).
- * <p>
- * This should only be used for <b>TESTING</b>.
- */
-public class HadoopSecurityEnabledUserProviderForTesting extends UserProvider {
-
-  @Override
-  public boolean isHBaseSecurityEnabled() {
-    return false;
-  }
-
-  @Override
-  public boolean isHadoopSecurityEnabled() {
-    return true;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index c6a8761..cbff2de 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -36,6 +36,8 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.Callable;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -88,6 +90,7 @@ import 
org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
@@ -109,9 +112,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestRule;
 import org.mockito.Mockito;
 
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
 /**
  * Simple test for {@link HFileOutputFormat2}.
  * Sets up and runs a mapreduce job that writes hfile output.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
deleted file mode 100644
index 529a448..0000000
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ /dev/null
@@ -1,669 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.Deque;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MapReduceTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
-
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-
-/**
- * Test cases for the atomic load error handling of the bulk load 
functionality.
- */
-@Category({MapReduceTests.class, LargeTests.class})
-public class TestLoadIncrementalHFilesSplitRecovery {
-  private static final Log LOG = 
LogFactory.getLog(TestHRegionServerBulkLoad.class);
-
-  static HBaseTestingUtility util;
-  //used by secure subclass
-  static boolean useSecure = false;
-
-  final static int NUM_CFS = 10;
-  final static byte[] QUAL = Bytes.toBytes("qual");
-  final static int ROWCOUNT = 100;
-
-  private final static byte[][] families = new byte[NUM_CFS][];
-
-  @Rule
-  public TestName name = new TestName();
-
-  static {
-    for (int i = 0; i < NUM_CFS; i++) {
-      families[i] = Bytes.toBytes(family(i));
-    }
-  }
-
-  static byte[] rowkey(int i) {
-    return Bytes.toBytes(String.format("row_%08d", i));
-  }
-
-  static String family(int i) {
-    return String.format("family_%04d", i);
-  }
-
-  static byte[] value(int i) {
-    return Bytes.toBytes(String.format("%010d", i));
-  }
-
-  public static void buildHFiles(FileSystem fs, Path dir, int value)
-      throws IOException {
-    byte[] val = value(value);
-    for (int i = 0; i < NUM_CFS; i++) {
-      Path testIn = new Path(dir, family(i));
-
-      TestHRegionServerBulkLoad.createHFile(fs, new Path(testIn, "hfile_" + i),
-          Bytes.toBytes(family(i)), QUAL, val, ROWCOUNT);
-    }
-  }
-
-  /**
-   * Creates a table with given table name and specified number of column
-   * families if the table does not already exist.
-   */
-  private void setupTable(final Connection connection, TableName table, int 
cfs)
-  throws IOException {
-    try {
-      LOG.info("Creating table " + table);
-      HTableDescriptor htd = new HTableDescriptor(table);
-      for (int i = 0; i < cfs; i++) {
-        htd.addFamily(new HColumnDescriptor(family(i)));
-      }
-      try (Admin admin = connection.getAdmin()) {
-        admin.createTable(htd);
-      }
-    } catch (TableExistsException tee) {
-      LOG.info("Table " + table + " already exists");
-    }
-  }
-
-  /**
-   * Creates a table with given table name,specified number of column 
families<br>
-   * and splitkeys if the table does not already exist.
-   * @param table
-   * @param cfs
-   * @param SPLIT_KEYS
-   */
-  private void setupTableWithSplitkeys(TableName table, int cfs, byte[][] 
SPLIT_KEYS)
-      throws IOException {
-    try {
-      LOG.info("Creating table " + table);
-      HTableDescriptor htd = new HTableDescriptor(table);
-      for (int i = 0; i < cfs; i++) {
-        htd.addFamily(new HColumnDescriptor(family(i)));
-      }
-
-      util.createTable(htd, SPLIT_KEYS);
-    } catch (TableExistsException tee) {
-      LOG.info("Table " + table + " already exists");
-    }
-  }
-
-  private Path buildBulkFiles(TableName table, int value) throws Exception {
-    Path dir = util.getDataTestDirOnTestFS(table.getNameAsString());
-    Path bulk1 = new Path(dir, table.getNameAsString() + value);
-    FileSystem fs = util.getTestFileSystem();
-    buildHFiles(fs, bulk1, value);
-    return bulk1;
-  }
-
-  /**
-   * Populate table with known values.
-   */
-  private void populateTable(final Connection connection, TableName table, int 
value)
-  throws Exception {
-    // create HFiles for different column families
-    LoadIncrementalHFiles lih = new 
LoadIncrementalHFiles(util.getConfiguration());
-    Path bulk1 = buildBulkFiles(table, value);
-    try (Table t = connection.getTable(table);
-        RegionLocator locator = connection.getRegionLocator(table);
-        Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(bulk1, admin, t, locator);
-    }
-  }
-
-  /**
-   * Split the known table in half.  (this is hard coded for this test suite)
-   */
-  private void forceSplit(TableName table) {
-    try {
-      // need to call regions server to by synchronous but isn't visible.
-      HRegionServer hrs = util.getRSForFirstRegionInTable(table);
-
-      for (HRegionInfo hri :
-          ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
-        if (hri.getTable().equals(table)) {
-          util.getAdmin().splitRegionAsync(hri.getRegionName(), 
rowkey(ROWCOUNT / 2));
-          //ProtobufUtil.split(null, hrs.getRSRpcServices(), hri, 
rowkey(ROWCOUNT / 2));
-        }
-      }
-
-      // verify that split completed.
-      int regions;
-      do {
-        regions = 0;
-        for (HRegionInfo hri :
-            ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
-          if (hri.getTable().equals(table)) {
-            regions++;
-          }
-        }
-        if (regions != 2) {
-          LOG.info("Taking some time to complete split...");
-          Thread.sleep(250);
-        }
-      } while (regions != 2);
-    } catch (IOException e) {
-      e.printStackTrace();
-    } catch (InterruptedException e) {
-      e.printStackTrace();
-    }
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    util = new HBaseTestingUtility();
-    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
"");
-    util.startMiniCluster(1);
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws Exception {
-    util.shutdownMiniCluster();
-  }
-
-  /**
-   * Checks that all columns have the expected value and that there is the
-   * expected number of rows.
-   * @throws IOException
-   */
-  void assertExpectedTable(TableName table, int count, int value) throws 
IOException {
-    HTableDescriptor [] htds = 
util.getAdmin().listTables(table.getNameAsString());
-    assertEquals(htds.length, 1);
-    Table t = null;
-    try {
-      t = util.getConnection().getTable(table);
-      Scan s = new Scan();
-      ResultScanner sr = t.getScanner(s);
-      int i = 0;
-      for (Result r : sr) {
-        i++;
-        for (NavigableMap<byte[], byte[]> nm : r.getNoVersionMap().values()) {
-          for (byte[] val : nm.values()) {
-            assertTrue(Bytes.equals(val, value(value)));
-          }
-        }
-      }
-      assertEquals(count, i);
-    } catch (IOException e) {
-      fail("Failed due to exception");
-    } finally {
-      if (t != null) t.close();
-    }
-  }
-
-  /**
-   * Test that shows that exception thrown from the RS side will result in an
-   * exception on the LIHFile client.
-   */
-  @Test(expected=IOException.class, timeout=120000)
-  public void testBulkLoadPhaseFailure() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    final AtomicInteger attmptedCalls = new AtomicInteger();
-    final AtomicInteger failedCalls = new AtomicInteger();
-    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
-    try (Connection connection = ConnectionFactory.createConnection(util
-        .getConfiguration())) {
-      setupTable(connection, table, 10);
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
-          util.getConfiguration()) {
-        @Override
-        protected List<LoadQueueItem> tryAtomicRegionLoad(
-            ClientServiceCallable<byte[]> serviceCallable, TableName 
tableName, final byte[] first,
-            Collection<LoadQueueItem> lqis) throws IOException {
-          int i = attmptedCalls.incrementAndGet();
-          if (i == 1) {
-            Connection errConn;
-            try {
-              errConn = getMockedConnection(util.getConfiguration());
-              serviceCallable = this.buildClientServiceCallable(errConn, 
table, first, lqis, true);
-            } catch (Exception e) {
-              LOG.fatal("mocking cruft, should never happen", e);
-              throw new RuntimeException("mocking cruft, should never happen");
-            }
-            failedCalls.incrementAndGet();
-            return super.tryAtomicRegionLoad(serviceCallable, tableName, 
first, lqis);
-          }
-
-          return super.tryAtomicRegionLoad(serviceCallable, tableName, first, 
lqis);
-        }
-      };
-      try {
-        // create HFiles for different column families
-        Path dir = buildBulkFiles(table, 1);
-        try (Table t = connection.getTable(table);
-            RegionLocator locator = connection.getRegionLocator(table);
-            Admin admin = connection.getAdmin()) {
-          lih.doBulkLoad(dir, admin, t, locator);
-        }
-      } finally {
-        util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-            HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-      }
-      fail("doBulkLoad should have thrown an exception");
-    }
-  }
-
-  /**
-   * Test that shows that exception thrown from the RS side will result in the
-   * expected number of retries set by ${@link 
HConstants#HBASE_CLIENT_RETRIES_NUMBER}
-   * when ${@link LoadIncrementalHFiles#RETRY_ON_IO_EXCEPTION} is set
-   */
-  @Test
-  public void testRetryOnIOException() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    final AtomicInteger calls = new AtomicInteger(1);
-    final Connection conn = ConnectionFactory.createConnection(util
-        .getConfiguration());
-    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
-    util.getConfiguration().setBoolean(
-        LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, true);
-    final LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
-        util.getConfiguration()) {
-      @Override
-      protected List<LoadQueueItem> tryAtomicRegionLoad(
-          ClientServiceCallable<byte[]> serverCallable, TableName tableName,
-          final byte[] first, Collection<LoadQueueItem> lqis)
-          throws IOException {
-        if (calls.getAndIncrement() < util.getConfiguration().getInt(
-            HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-            HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) - 1) {
-          ClientServiceCallable<byte[]> newServerCallable = new 
ClientServiceCallable<byte[]>(
-              conn, tableName, first, new RpcControllerFactory(
-                  util.getConfiguration()).newController(), 
HConstants.PRIORITY_UNSET) {
-            @Override
-            public byte[] rpcCall() throws Exception {
-              throw new IOException("Error calling something on RegionServer");
-            }
-          };
-          return super.tryAtomicRegionLoad(newServerCallable, tableName, 
first, lqis);
-        } else {
-          return super.tryAtomicRegionLoad(serverCallable, tableName, first, 
lqis);
-        }
-      }
-    };
-    setupTable(conn, table, 10);
-    Path dir = buildBulkFiles(table, 1);
-    lih.doBulkLoad(dir, conn.getAdmin(), conn.getTable(table),
-        conn.getRegionLocator(table));
-    util.getConfiguration().setBoolean(
-        LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false);
-
-  }
-
-  @SuppressWarnings("deprecation")
-  private ClusterConnection getMockedConnection(final Configuration conf)
-  throws IOException, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
-    ClusterConnection c = Mockito.mock(ClusterConnection.class);
-    Mockito.when(c.getConfiguration()).thenReturn(conf);
-    Mockito.doNothing().when(c).close();
-    // Make it so we return a particular location when asked.
-    final HRegionLocation loc = new 
HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
-        ServerName.valueOf("example.org", 1234, 0));
-    Mockito.when(c.getRegionLocation((TableName) Mockito.any(),
-        (byte[]) Mockito.any(), Mockito.anyBoolean())).
-      thenReturn(loc);
-    Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) 
Mockito.any())).
-      thenReturn(loc);
-    ClientProtos.ClientService.BlockingInterface hri =
-      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
-    Mockito.when(hri.bulkLoadHFile((RpcController)Mockito.any(), 
(BulkLoadHFileRequest)Mockito.any())).
-      thenThrow(new ServiceException(new IOException("injecting bulk load 
error")));
-    Mockito.when(c.getClient(Mockito.any(ServerName.class))).
-      thenReturn(hri);
-    return c;
-  }
-
-  /**
-   * This test exercises the path where there is a split after initial
-   * validation but before the atomic bulk load call. We cannot use 
presplitting
-   * to test this path, so we actually inject a split just before the atomic
-   * region load.
-   */
-  @Test (timeout=120000)
-  public void testSplitWhileBulkLoadPhase() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    try (Connection connection = 
ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, table, 10);
-      populateTable(connection, table,1);
-      assertExpectedTable(table, ROWCOUNT, 1);
-
-      // Now let's cause trouble.  This will occur after checks and cause bulk
-      // files to fail when attempt to atomically import.  This is recoverable.
-      final AtomicInteger attemptedCalls = new AtomicInteger();
-      LoadIncrementalHFiles lih2 = new 
LoadIncrementalHFiles(util.getConfiguration()) {
-        @Override
-        protected void bulkLoadPhase(final Table htable, final Connection conn,
-            ExecutorService pool, Deque<LoadQueueItem> queue,
-            final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean 
copyFile,
-            Map<LoadQueueItem, ByteBuffer> item2RegionMap)
-                throws IOException {
-          int i = attemptedCalls.incrementAndGet();
-          if (i == 1) {
-            // On first attempt force a split.
-            forceSplit(table);
-          }
-          super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, 
copyFile, item2RegionMap);
-        }
-      };
-
-      // create HFiles for different column families
-      try (Table t = connection.getTable(table);
-          RegionLocator locator = connection.getRegionLocator(table);
-          Admin admin = connection.getAdmin()) {
-        Path bulk = buildBulkFiles(table, 2);
-        lih2.doBulkLoad(bulk, admin, t, locator);
-      }
-
-      // check that data was loaded
-      // The three expected attempts are 1) failure because need to split, 2)
-      // load of split top 3) load of split bottom
-      assertEquals(attemptedCalls.get(), 3);
-      assertExpectedTable(table, ROWCOUNT, 2);
-    }
-  }
-
-  /**
-   * This test splits a table and attempts to bulk load.  The bulk import files
-   * should be split before atomically importing.
-   */
-  @Test (timeout=120000)
-  public void testGroupOrSplitPresplit() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    try (Connection connection = 
ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, table, 10);
-      populateTable(connection, table, 1);
-      assertExpectedTable(connection, table, ROWCOUNT, 1);
-      forceSplit(table);
-
-      final AtomicInteger countedLqis= new AtomicInteger();
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
-          util.getConfiguration()) {
-        @Override
-        protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-            Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-            final LoadQueueItem item, final Table htable,
-            final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-          Pair<List<LoadQueueItem>, String> lqis = 
super.groupOrSplit(regionGroups, item, htable,
-              startEndKeys);
-          if (lqis != null && lqis.getFirst() != null) {
-            countedLqis.addAndGet(lqis.getFirst().size());
-          }
-          return lqis;
-        }
-      };
-
-      // create HFiles for different column families
-      Path bulk = buildBulkFiles(table, 2);
-      try (Table t = connection.getTable(table);
-          RegionLocator locator = connection.getRegionLocator(table);
-          Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(bulk, admin, t, locator);
-      }
-      assertExpectedTable(connection, table, ROWCOUNT, 2);
-      assertEquals(20, countedLqis.get());
-    }
-  }
-
-  /**
-   * This test creates a table with many small regions.  The bulk load files
-   * would be splitted multiple times before all of them can be loaded 
successfully.
-   */
-  @Test (timeout=120000)
-  public void testSplitTmpFileCleanUp() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"),
-        Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"),
-        Bytes.toBytes("row_00000040"), Bytes.toBytes("row_00000050")};
-    try (Connection connection = 
ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTableWithSplitkeys(table, 10, SPLIT_KEYS);
-
-      LoadIncrementalHFiles lih = new 
LoadIncrementalHFiles(util.getConfiguration());
-
-      // create HFiles
-      Path bulk = buildBulkFiles(table, 2);
-      try (Table t = connection.getTable(table);
-          RegionLocator locator = connection.getRegionLocator(table);
-          Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(bulk, admin, t, locator);
-      }
-      // family path
-      Path tmpPath = new Path(bulk, family(0));
-      // TMP_DIR under family path
-      tmpPath = new Path(tmpPath, LoadIncrementalHFiles.TMP_DIR);
-      FileSystem fs = bulk.getFileSystem(util.getConfiguration());
-      // HFiles have been splitted, there is TMP_DIR
-      assertTrue(fs.exists(tmpPath));
-      // TMP_DIR should have been cleaned-up
-      assertNull(LoadIncrementalHFiles.TMP_DIR + " should be empty.",
-        FSUtils.listStatus(fs, tmpPath));
-      assertExpectedTable(connection, table, ROWCOUNT, 2);
-    }
-  }
-
-  /**
-   * This simulates an remote exception which should cause LIHF to exit with an
-   * exception.
-   */
-  @Test(expected = IOException.class, timeout=120000)
-  public void testGroupOrSplitFailure() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    try (Connection connection = 
ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, tableName, 10);
-
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
-          util.getConfiguration()) {
-        int i = 0;
-
-        @Override
-        protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-            Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-            final LoadQueueItem item, final Table table,
-            final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-          i++;
-
-          if (i == 5) {
-            throw new IOException("failure");
-          }
-          return super.groupOrSplit(regionGroups, item, table, startEndKeys);
-        }
-      };
-
-      // create HFiles for different column families
-      Path dir = buildBulkFiles(tableName,1);
-      try (Table t = connection.getTable(tableName);
-          RegionLocator locator = connection.getRegionLocator(tableName);
-          Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(dir, admin, t, locator);
-      }
-    }
-
-    fail("doBulkLoad should have thrown an exception");
-  }
-
-  @Test (timeout=120000)
-  public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
-    // Share connection. We were failing to find the table with our new 
reverse scan because it
-    // looks for first region, not any region -- that is how it works now.  
The below removes first
-    // region in test.  Was reliant on the Connection caching having first 
region.
-    Connection connection = 
ConnectionFactory.createConnection(util.getConfiguration());
-    Table table = connection.getTable(tableName);
-
-    setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
-    Path dir = buildBulkFiles(tableName, 2);
-
-    final AtomicInteger countedLqis = new AtomicInteger();
-    LoadIncrementalHFiles loader = new 
LoadIncrementalHFiles(util.getConfiguration()) {
-
-      @Override
-      protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-          Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-          final LoadQueueItem item, final Table htable,
-          final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-        Pair<List<LoadQueueItem>, String> lqis = 
super.groupOrSplit(regionGroups, item, htable,
-            startEndKeys);
-        if (lqis != null && lqis.getFirst() != null) {
-          countedLqis.addAndGet(lqis.getFirst().size());
-        }
-        return lqis;
-      }
-    };
-
-    // do bulkload when there is no region hole in hbase:meta.
-    try (Table t = connection.getTable(tableName);
-        RegionLocator locator = connection.getRegionLocator(tableName);
-        Admin admin = connection.getAdmin()) {
-      loader.doBulkLoad(dir, admin, t, locator);
-    } catch (Exception e) {
-      LOG.error("exeception=", e);
-    }
-    // check if all the data are loaded into the table.
-    this.assertExpectedTable(tableName, ROWCOUNT, 2);
-
-    dir = buildBulkFiles(tableName, 3);
-
-    // Mess it up by leaving a hole in the hbase:meta
-    List<HRegionInfo> regionInfos = 
MetaTableAccessor.getTableRegions(connection, tableName);
-    for (HRegionInfo regionInfo : regionInfos) {
-      if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) 
{
-        MetaTableAccessor.deleteRegion(connection, regionInfo);
-        break;
-      }
-    }
-
-    try (Table t = connection.getTable(tableName);
-        RegionLocator locator = connection.getRegionLocator(tableName);
-        Admin admin = connection.getAdmin()) {
-      loader.doBulkLoad(dir, admin, t, locator);
-    } catch (Exception e) {
-      LOG.error("exception=", e);
-      assertTrue("IOException expected", e instanceof IOException);
-    }
-
-    table.close();
-
-    // Make sure at least the one region that still exists can be found.
-    regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
-    assertTrue(regionInfos.size() >= 1);
-
-    this.assertExpectedTable(connection, tableName, ROWCOUNT, 2);
-    connection.close();
-  }
-
-  /**
-   * Checks that all columns have the expected value and that there is the
-   * expected number of rows.
-   * @throws IOException
-   */
-  void assertExpectedTable(final Connection connection, TableName table, int 
count, int value)
-  throws IOException {
-    HTableDescriptor [] htds = 
util.getAdmin().listTables(table.getNameAsString());
-    assertEquals(htds.length, 1);
-    Table t = null;
-    try {
-      t = connection.getTable(table);
-      Scan s = new Scan();
-      ResultScanner sr = t.getScanner(s);
-      int i = 0;
-      for (Result r : sr) {
-        i++;
-        for (NavigableMap<byte[], byte[]> nm : r.getNoVersionMap().values()) {
-          for (byte[] val : nm.values()) {
-            assertTrue(Bytes.equals(val, value(value)));
-          }
-        }
-      }
-      assertEquals(count, i);
-    } catch (IOException e) {
-      fail("Failed due to exception");
-    } finally {
-      if (t != null) t.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java
deleted file mode 100644
index 78fddbc..0000000
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MapReduceTests;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.SecureTestUtil;
-
-import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
-
-/**
- * Reruns TestLoadIncrementalHFiles using LoadIncrementalHFiles in secure mode.
- * This suite is unable to verify the security handoff/turnover
- * as miniCluster is running as system user thus has root privileges
- * and delegation tokens don't seem to work on miniDFS.
- *
- * Thus SecureBulkload can only be completely verified by running
- * integration tests against a secure cluster. This suite is still
- * invaluable as it verifies the other mechanisms that need to be
- * supported as part of a LoadIncrementalFiles call.
- */
-@Category({MapReduceTests.class, LargeTests.class})
-public class TestSecureLoadIncrementalHFiles extends  
TestLoadIncrementalHFiles{
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    // set the always on security provider
-    UserProvider.setUserProviderForTesting(util.getConfiguration(),
-      HadoopSecurityEnabledUserProviderForTesting.class);
-    // setup configuration
-    SecureTestUtil.enableSecurity(util.getConfiguration());
-    util.getConfiguration().setInt(
-        LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
-        MAX_FILES_PER_REGION_PER_FAMILY);
-    // change default behavior so that tag values are returned with normal rpcs
-    util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
-        KeyValueCodecWithTags.class.getCanonicalName());
-
-    util.startMiniCluster();
-
-    // Wait for the ACL table to become available
-    util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
-
-    setupNamespace();
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
deleted file mode 100644
index 0e877ad..0000000
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MapReduceTests;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.SecureTestUtil;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-
-/**
- * Reruns TestSecureLoadIncrementalHFilesSplitRecovery
- * using LoadIncrementalHFiles in secure mode.
- * This suite is unable to verify the security handoff/turnove
- * as miniCluster is running as system user thus has root privileges
- * and delegation tokens don't seem to work on miniDFS.
- *
- * Thus SecureBulkload can only be completely verified by running
- * integration tests against a secure cluster. This suite is still
- * invaluable as it verifies the other mechanisms that need to be
- * supported as part of a LoadIncrementalFiles call.
- */
-@Category({MapReduceTests.class, LargeTests.class})
-public class TestSecureLoadIncrementalHFilesSplitRecovery extends 
TestLoadIncrementalHFilesSplitRecovery {
-
-  //This "overrides" the parent static method
-  //make sure they are in sync
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    util = new HBaseTestingUtility();
-    // set the always on security provider
-    UserProvider.setUserProviderForTesting(util.getConfiguration(),
-      HadoopSecurityEnabledUserProviderForTesting.class);
-    // setup configuration
-    SecureTestUtil.enableSecurity(util.getConfiguration());
-
-    util.startMiniCluster();
-
-    // Wait for the ACL table to become available
-    util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
-  }
-
-  //Disabling this test as it does not work in secure mode
-  @Test (timeout=180000)
-  @Override
-  public void testBulkLoadPhaseFailure() {
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java
index 98d03c0..df9f4ff 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.snapshot;
 
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
-import 
org.apache.hadoop.hbase.mapreduce.HadoopSecurityEnabledUserProviderForTesting;
+import 
org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.security.access.SecureTestUtil;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e53f292/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java
index 7d4832c..def0838 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java
@@ -20,7 +20,7 @@
 package org.apache.hadoop.hbase.snapshot;
 
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import 
org.apache.hadoop.hbase.mapreduce.HadoopSecurityEnabledUserProviderForTesting;
+import 
org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.security.access.SecureTestUtil;

Reply via email to