[ 
https://issues.apache.org/jira/browse/HDFS-16686?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17582057#comment-17582057
 ] 

ASF GitHub Bot commented on HDFS-16686:
---------------------------------------

sunchao commented on code in PR #4724:
URL: https://github.com/apache/hadoop/pull/4724#discussion_r950588311


##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java:
##########
@@ -35,7 +35,7 @@
 import java.util.List;
 import java.util.Random;
 
-public class MiniQJMHACluster {
+public class MiniQJMHACluster implements AutoCloseable {

Review Comment:
   Is this change related? I'm not sure whether it should be included in this 
PR, since it changes the `shutdown` method signature.



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java:
##########
@@ -80,16 +82,38 @@ public static void runCmd(DFSAdmin dfsadmin, boolean 
success,
     }
   }
 
+  @Rule
+  public TemporaryFolder folder= new TemporaryFolder();

Review Comment:
   nit: space before `=`



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java:
##########
@@ -80,16 +82,38 @@ public static void runCmd(DFSAdmin dfsadmin, boolean 
success,
     }
   }
 
+  @Rule
+  public TemporaryFolder folder= new TemporaryFolder();
+
+  /**
+   * Create a default HDFS configuration which has test-specific data 
directories.  This is
+   * intended to protect against interactions between test runs that might 
corrupt results.  Each
+   * test run's data is automatically cleaned-up by JUnit.
+   *
+   * @return a default configuration with test-specific data directories
+   */
+  public Configuration getHdfsConfiguration() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+
+    // Override the file system locations with test-specific temporary folders
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        folder.newFolder("dfs/name").toString());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+        folder.newFolder("dfs/namesecondary").toString());
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+        folder.newFolder("dfs/data").toString());
+
+    return conf;
+  }
+
   /**
    * Test DFSAdmin Upgrade Command.
    */
   @Test
   public void testDFSAdminRollingUpgradeCommands() throws Exception {
     // start a cluster
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    final Configuration conf = getHdfsConfiguration();

Review Comment:
   could you highlight what's the major change in this test?



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java:
##########
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestGetJournalEditServlet {
+
+  private final static Configuration CONF = new HdfsConfiguration();
+
+  private final static GetJournalEditServlet SERVLET = new 
GetJournalEditServlet();
+
+  @BeforeClass
+  public static void setUp() throws ServletException {
+    LogManager.getLogger(GetJournalEditServlet.class).setLevel(Level.DEBUG);

Review Comment:
   why do we want to set the logger level to DEBUG here?



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java:
##########
@@ -407,10 +426,8 @@ private static void rollbackRollingUpgrade(Path foo, Path 
bar,
   @Test
   public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
     // start a cluster
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(1).build()){

Review Comment:
   nit space before `{`





> GetJournalEditServlet fails to authorize valid Kerberos request
> ---------------------------------------------------------------
>
>                 Key: HDFS-16686
>                 URL: https://issues.apache.org/jira/browse/HDFS-16686
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>          Components: journal-node
>         Environment: Running in Kubernetes using Java 11 in an HA 
> configuration.  JournalNodes run on separate pods and have their own Kerberos 
> principal "jn/<hostname>@<realm>".
>            Reporter: Steve Vaughan
>            Assignee: Steve Vaughan
>            Priority: Major
>              Labels: pull-request-available
>
> GetJournalEditServlet uses request.getRemoteuser() to determine the 
> remoteShortName for Kerberos authorization, which fails to match when the 
> JournalNode uses its own Kerberos principal (e.g. jn/<hostname>@<realm>).
> This can be fixed by using the UserGroupInformation provided by the base 
> DfsServlet class using the getUGI(request, conf) call.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to