This is an automated email from the ASF dual-hosted git repository.
apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-1.4 by this push:
new ac2dfbb HBASE-20586: add support for clusters on different realms
(with cross-realm authentication)
ac2dfbb is described below
commit ac2dfbbc8caed058ba432441a6feb797c4f91654
Author: wellington <[email protected]>
AuthorDate: Tue May 15 15:32:54 2018 +0100
HBASE-20586: add support for clusters on different realms (with cross-realm
authentication)
Signed-off-by: Andrew Purtell <[email protected]>
---
.../apache/hadoop/hbase/mapreduce/SyncTable.java | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index 32e3b00..ddb169e 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -78,12 +79,28 @@ public class SyncTable extends Configured implements Tool {
super(conf);
}
+ private void initCredentialsForHBase(String zookeeper, Job job) throws
IOException {
+ Configuration peerConf = HBaseConfiguration.createClusterConf(job
+ .getConfiguration(), zookeeper);
+ if(peerConf.get("hbase.security.authentication").equals("kerberos")){
+ TableMapReduceUtil.initCredentialsForCluster(job, peerConf);
+ }
+ }
+
public Job createSubmittableJob(String[] args) throws IOException {
FileSystem fs = sourceHashDir.getFileSystem(getConf());
if (!fs.exists(sourceHashDir)) {
throw new IOException("Source hash dir not found: " + sourceHashDir);
}
+ Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name",
+ "syncTable_" + sourceTableName + "-" + targetTableName));
+ Configuration jobConf = job.getConfiguration();
+ if (jobConf.get("hadoop.security.authentication").equals("kerberos")) {
+ TokenCache.obtainTokensForNamenodes(job.getCredentials(), new
+ Path[] { sourceHashDir }, getConf());
+ }
+
HashTable.TableHash tableHash = HashTable.TableHash.read(getConf(),
sourceHashDir);
LOG.info("Read source hash manifest: " + tableHash);
LOG.info("Read " + tableHash.partitions.size() + " partition keys");
@@ -113,18 +130,17 @@ public class SyncTable extends Configured implements Tool
{
+ " found in the partitions file is " + tableHash.partitions.size());
}
- Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name",
- "syncTable_" + sourceTableName + "-" + targetTableName));
- Configuration jobConf = job.getConfiguration();
job.setJarByClass(HashTable.class);
jobConf.set(SOURCE_HASH_DIR_CONF_KEY, sourceHashDir.toString());
jobConf.set(SOURCE_TABLE_CONF_KEY, sourceTableName);
jobConf.set(TARGET_TABLE_CONF_KEY, targetTableName);
if (sourceZkCluster != null) {
jobConf.set(SOURCE_ZK_CLUSTER_CONF_KEY, sourceZkCluster);
+ initCredentialsForHBase(sourceZkCluster, job);
}
if (targetZkCluster != null) {
jobConf.set(TARGET_ZK_CLUSTER_CONF_KEY, targetZkCluster);
+ initCredentialsForHBase(targetZkCluster, job);
}
jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);