Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c24505053 -> 0882725c8


HDFS-13040. Kerberized inotify client fails despite kinit properly. Contributed 
by Istvan Fajth, Wei-Chiu Chuang, Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0882725c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0882725c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0882725c

Branch: refs/heads/branch-2
Commit: 0882725c889f6d77dd7feae986a84bf63cc5f053
Parents: c245050
Author: Xiao Chen <x...@apache.org>
Authored: Mon Mar 5 09:38:04 2018 -0800
Committer: Xiao Chen <x...@apache.org>
Committed: Mon Mar 5 09:44:42 2018 -0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/NameNodeRpcServer.java | 32 ++++++++++++++++----
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  | 19 +++++++++---
 2 files changed, 41 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0882725c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 89571f4..e6d03bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -33,6 +33,7 @@ import static org.apache.hadoop.util.Time.now;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -175,6 +176,7 @@ import org.apache.hadoop.ipc.RefreshResponse;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
@@ -2113,15 +2115,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   }
 
   @Override // ClientProtocol
-  public EventBatchList getEditsFromTxid(long txid) throws IOException {
+  public EventBatchList getEditsFromTxid(final long txid) throws IOException {
     checkNNStartup();
     namesystem.checkOperation(OperationCategory.READ); // only active
     namesystem.checkSuperuserPrivilege();
-    int maxEventsPerRPC = nn.getConf().getInt(
+    final int maxEventsPerRPC = nn.getConf().getInt(
         DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY,
         DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT);
-    FSEditLog log = namesystem.getFSImage().getEditLog();
-    long syncTxid = log.getSyncTxId();
+    final FSEditLog log = namesystem.getFSImage().getEditLog();
+    final long syncTxid = log.getSyncTxId();
     // If we haven't synced anything yet, we can only read finalized
     // segments since we can't reliably determine which txns in in-progress
     // segments have actually been committed (e.g. written to a quorum of JNs).
@@ -2130,8 +2132,26 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
     // journals. (In-progress segments written by old writers are already
     // discarded for us, so if we read any in-progress segments they are
     // guaranteed to have been written by this NameNode.)
-    boolean readInProgress = syncTxid > 0;
+    final boolean readInProgress = syncTxid > 0;
+
+    // doas the NN login user for the actual operations to get edits.
+    // Notably this is necessary when polling from the remote edits via https.
+    // We have validated the client is a superuser from the NN RPC, so this
+    // running as the login user here is safe.
+    EventBatchList ret = SecurityUtil.doAsLoginUser(
+        new PrivilegedExceptionAction<EventBatchList>() {
+          @Override
+          public EventBatchList run() throws IOException {
+            return getEventBatchList(syncTxid, txid, log, readInProgress,
+                maxEventsPerRPC);
+          }
+        });
+    return ret;
+  }
 
+  private EventBatchList getEventBatchList(long syncTxid, long txid,
+      FSEditLog log, boolean readInProgress, int maxEventsPerRPC)
+      throws IOException {
     List<EventBatch> batches = Lists.newArrayList();
     int totalEvents = 0;
     long maxSeenTxid = -1;
@@ -2150,7 +2170,7 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
       // and are using QJM -- the edit log will be closed and this exception
       // will result
       LOG.info("NN is transitioning from active to standby and FSEditLog " +
-      "is closed -- could not read edits");
+          "is closed -- could not read edits");
       return new EventBatchList(batches, firstSeenTxid, maxSeenTxid, syncTxid);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0882725c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 5caccf4..0cb1838 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -52,7 +52,8 @@ public class MiniQJMHACluster {
     private final Configuration conf;
     private StartupOption startOpt = null;
     private final MiniDFSCluster.Builder dfsBuilder;
-    
+    private boolean forceRemoteEditsOnly = false;
+
     public Builder(Configuration conf) {
       this.conf = conf;
       // most QJMHACluster tests don't need DataNodes, so we'll make
@@ -71,6 +72,11 @@ public class MiniQJMHACluster {
     public void startupOption(StartupOption startOpt) {
       this.startOpt = startOpt;
     }
+
+    public Builder setForceRemoteEditsOnly(boolean val) {
+      this.forceRemoteEditsOnly = val;
+      return this;
+    }
   }
   
   public static MiniDFSNNTopology createDefaultTopology(int basePort) {
@@ -100,7 +106,7 @@ public class MiniQJMHACluster {
         // start cluster with 2 NameNodes
         MiniDFSNNTopology topology = createDefaultTopology(basePort);
 
-        initHAConf(journalURI, builder.conf, basePort);
+        initHAConf(journalURI, builder, basePort);
 
         // First start up the NNs just to format the namespace. The 
MinIDFSCluster
         // has no way to just format the NameNodes without also starting them.
@@ -131,11 +137,16 @@ public class MiniQJMHACluster {
     }
   }
 
-  private Configuration initHAConf(URI journalURI, Configuration conf,
+  private Configuration initHAConf(URI journalURI, Builder builder,
       int basePort) {
     conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
         journalURI.toString());
-    
+    if (builder.forceRemoteEditsOnly) {
+      conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, 
journalURI.toString());
+      conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
+          journalURI.toString());
+    }
+
     String address1 = "127.0.0.1:" + basePort;
     String address2 = "127.0.0.1:" + (basePort + 2);
     conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to