This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 24c1000dd6 HDDS-11041. Add admin request filter for S3 requests and
UGI support for GrpcOmTransport (#7268)
24c1000dd6 is described below
commit 24c1000dd6e0d0c2cbe0bad88d209f9ba2029237
Author: Abhishek Pal <[email protected]>
AuthorDate: Sat Oct 26 22:44:14 2024 +0530
HDDS-11041. Add admin request filter for S3 requests and UGI support for
GrpcOmTransport (#7268)
---
.../org/apache/hadoop/hdds/server/OzoneAdmins.java | 89 +++++++++++++++
.../apache/hadoop/hdds/server/TestOzoneAdmins.java | 125 +++++++++++++++++++++
.../ozone/om/ha/GrpcOMFailoverProxyProvider.java | 11 +-
.../om/ha/HadoopRpcOMFailoverProxyProvider.java | 30 +----
.../ozone/om/ha/OMFailoverProxyProviderBase.java | 41 +++++++
.../ozone/om/protocolPB/GrpcOmTransport.java | 1 +
.../main/compose/ozonesecure/docker-compose.yaml | 2 +-
.../dist/src/main/compose/ozonesecure/test-fcq.sh | 1 +
.../src/main/smoketest/s3/secretgenerate.robot | 8 +-
.../dist/src/main/smoketest/s3/secretrevoke.robot | 8 +-
.../apache/hadoop/ozone/om/OzoneConfigUtil.java | 42 -------
.../org/apache/hadoop/ozone/om/OzoneManager.java | 8 +-
.../hadoop/ozone/om/TestOzoneConfigUtil.java | 44 --------
.../hadoop/ozone/s3secret/S3AdminEndpoint.java | 34 ++++++
.../hadoop/ozone/s3secret/S3SecretAdminFilter.java | 60 ++++++++++
.../ozone/s3secret/S3SecretManagementEndpoint.java | 8 +-
.../hadoop/ozone/s3secret/TestSecretGenerate.java | 6 +-
.../hadoop/ozone/s3secret/TestSecretRevoke.java | 2 -
18 files changed, 376 insertions(+), 144 deletions(-)
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java
index 12b6b64f49..1f8568866d 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java
@@ -17,13 +17,16 @@
*/
package org.apache.hadoop.hdds.server;
+import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Set;
import com.google.common.collect.Sets;
+import jakarta.annotation.Nullable;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@@ -33,6 +36,8 @@ import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUP
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS_GROUPS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS;
/**
* This class contains ozone admin user information, username and group,
@@ -186,4 +191,88 @@ public class OzoneAdmins {
return conf.getTrimmedStringCollection(
OZONE_READONLY_ADMINISTRATORS_GROUPS);
}
+
+ /**
+ * Get the list of S3 administrators from Ozone config.
+ * <p/>
+ * <strong>Notes</strong>:
+ * <ul>
+ * <li>If <code>ozone.s3.administrators</code> value is empty string or
unset,
+ * defaults to <code>ozone.administrators</code> value.</li>
+ * <li>If current user is not part of the administrators group,
+ * {@link UserGroupInformation#getCurrentUser()} will be added to the
resulting list</li>
+ * </ul>
+ * @param conf An instance of {@link OzoneConfiguration} being used
+ * @return A {@link Collection} of the S3 administrator users
+ */
+ public static Set<String> getS3AdminsFromConfig(OzoneConfiguration conf)
throws IOException {
+ Set<String> ozoneAdmins = new
HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS));
+
+ if (ozoneAdmins.isEmpty()) {
+ ozoneAdmins = new
HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS));
+ }
+
+ String omSPN = UserGroupInformation.getCurrentUser().getShortUserName();
+ ozoneAdmins.add(omSPN);
+
+ return ozoneAdmins;
+ }
+
+ /**
+ * Get the list of the groups that are a part of S3 administrators from
Ozone config.
+ * <p/>
+ * <strong>Note</strong>: If <code>ozone.s3.administrators.groups</code>
value is empty or unset,
+ * defaults to the <code>ozone.administrators.groups</code> value
+ *
+ * @param conf An instance of {@link OzoneConfiguration} being used
+ * @return A {@link Collection} of the S3 administrator groups
+ */
+ public static Set<String> getS3AdminsGroupsFromConfig(OzoneConfiguration
conf) {
+ Set<String> s3AdminsGroup = new
HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS));
+
+ if (s3AdminsGroup.isEmpty() &&
conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) {
+ s3AdminsGroup = new
HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS));
+ }
+
+ return s3AdminsGroup;
+ }
+
+ /**
+ * Get the users and groups that are a part of S3 administrators.
+ * @param conf Stores an instance of {@link OzoneConfiguration} being used
+ * @return an instance of {@link OzoneAdmins} containing the S3 admin users
and groups
+ */
+ public static OzoneAdmins getS3Admins(OzoneConfiguration conf) {
+ Set<String> s3Admins;
+ try {
+ s3Admins = getS3AdminsFromConfig(conf);
+ } catch (IOException ie) {
+ s3Admins = Collections.emptySet();
+ }
+ Set<String> s3AdminGroups = getS3AdminsGroupsFromConfig(conf);
+
+ return new OzoneAdmins(s3Admins, s3AdminGroups);
+ }
+
+ /**
+ * Check if the provided user is an S3 administrator.
+ * @param user An instance of {@link UserGroupInformation} with information
about the user to verify
+ * @param s3Admins An instance of {@link OzoneAdmins} containing information
+ * of the S3 administrator users and groups in the system
+ * @return {@code true} if the provided user is an S3 administrator else
{@code false}
+ */
+ public static boolean isS3Admin(@Nullable UserGroupInformation user,
OzoneAdmins s3Admins) {
+ return null != user && s3Admins.isAdmin(user);
+ }
+
+ /**
+ * Check if the provided user is an S3 administrator.
+ * @param user An instance of {@link UserGroupInformation} with information
about the user to verify
+ * @param conf An instance of {@link OzoneConfiguration} being used
+ * @return {@code true} if the provided user is an S3 administrator else
{@code false}
+ */
+ public static boolean isS3Admin(@Nullable UserGroupInformation user,
OzoneConfiguration conf) {
+ OzoneAdmins s3Admins = getS3Admins(conf);
+ return isS3Admin(user, s3Admins);
+ }
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java
new file mode 100644
index 0000000000..47a90d05df
--- /dev/null
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.server;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * This class is to test the utilities present in the OzoneAdmins class.
+ */
+class TestOzoneAdmins {
+ // The following set of tests are to validate the S3 based utilities present
in OzoneAdmins
+ private OzoneConfiguration configuration;
+
+ @BeforeEach
+ void setUp() {
+ configuration = new OzoneConfiguration();
+ }
+
+ @ParameterizedTest
+ @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS,
+ OzoneConfigKeys.OZONE_ADMINISTRATORS})
+ void testS3AdminExtraction(String configKey) throws IOException {
+ configuration.set(configKey, "alice,bob");
+
+ assertThat(OzoneAdmins.getS3AdminsFromConfig(configuration))
+ .containsAll(Arrays.asList("alice", "bob"));
+ }
+
+ @ParameterizedTest
+ @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS,
+ OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS})
+ void testS3AdminGroupExtraction(String configKey) {
+ configuration.set(configKey, "test1, test2");
+
+ assertThat(OzoneAdmins.getS3AdminsGroupsFromConfig(configuration))
+ .containsAll(Arrays.asList("test1", "test2"));
+ }
+
+ @ParameterizedTest
+ @CsvSource({
+ OzoneConfigKeys.OZONE_ADMINISTRATORS + ", " +
OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS,
+ OzoneConfigKeys.OZONE_S3_ADMINISTRATORS + ", " +
OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS
+ })
+ void testIsAdmin(String adminKey, String adminGroupKey) {
+ // When there is no S3 admin, but Ozone admins present
+ configuration.set(adminKey, "alice");
+ configuration.set(adminGroupKey, "test_group");
+
+ OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration);
+ UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+ "alice", new String[] {"test_group"});
+
+ assertThat(admins.isAdmin(ugi)).isEqualTo(true);
+
+ // Test that when a user is present in an admin group but not an Ozone
Admin
+ UserGroupInformation ugiGroupOnly =
UserGroupInformation.createUserForTesting(
+ "bob", new String[] {"test_group"});
+ assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(true);
+ }
+
+ @ParameterizedTest
+ @ValueSource(booleans = {true, false})
+ void testIsAdminWithUgi(boolean isAdminSet) {
+ if (isAdminSet) {
+ configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice");
+ configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS,
"test_group");
+ }
+ OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration);
+ UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+ "alice", new String[] {"test_group"});
+ // Test that when a user is present in an admin group but not an Ozone
Admin
+ UserGroupInformation ugiGroupOnly =
UserGroupInformation.createUserForTesting(
+ "bob", new String[] {"test_group"});
+
+ assertThat(admins.isAdmin(ugi)).isEqualTo(isAdminSet);
+ assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(isAdminSet);
+ }
+
+ @ParameterizedTest
+ @ValueSource(booleans = {true, false})
+ void testIsS3AdminWithUgiAndConfiguration(boolean isAdminSet) {
+ if (isAdminSet) {
+ configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice");
+ configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS,
"test_group");
+ UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+ "alice", new String[] {"test_group"});
+ // Scenario when user is present in an admin group but not an Ozone Admin
+ UserGroupInformation ugiGroupOnly =
UserGroupInformation.createUserForTesting(
+ "bob", new String[] {"test_group"});
+
+ assertThat(OzoneAdmins.isS3Admin(ugi, configuration)).isEqualTo(true);
+ assertThat(OzoneAdmins.isS3Admin(ugiGroupOnly,
configuration)).isEqualTo(true);
+ } else {
+ assertThat(OzoneAdmins.isS3Admin(null, configuration)).isEqualTo(false);
+ }
+
+ }
+}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
index 65d9e55900..744ada797e 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
@@ -18,12 +18,9 @@
package org.apache.hadoop.ozone.om.ha;
import io.grpc.Status;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.ConfigurationException;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
-import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -41,6 +38,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import io.grpc.StatusRuntimeException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,9 +58,10 @@ public class GrpcOMFailoverProxyProvider<T> extends
LoggerFactory.getLogger(GrpcOMFailoverProxyProvider.class);
public GrpcOMFailoverProxyProvider(ConfigurationSource configuration,
+ UserGroupInformation ugi,
String omServiceId,
Class<T> protocol) throws IOException {
- super(configuration, omServiceId, protocol);
+ super(configuration, ugi, omServiceId, protocol);
}
@Override
@@ -116,9 +115,7 @@ public class GrpcOMFailoverProxyProvider<T> extends
private T createOMProxy() throws IOException {
InetSocketAddress addr = new InetSocketAddress(0);
- Configuration hadoopConf =
- LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf());
- return (T) RPC.getProxy(getInterface(), 0, addr, hadoopConf);
+ return createOMProxy(addr);
}
/**
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
index 543d2e4aed..4447a72ab1 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
@@ -29,15 +29,9 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.ha.ConfUtils;
@@ -59,9 +53,7 @@ public class HadoopRpcOMFailoverProxyProvider<T> extends
public static final Logger LOG =
LoggerFactory.getLogger(HadoopRpcOMFailoverProxyProvider.class);
- private final long omVersion;
private final Text delegationTokenService;
- private final UserGroupInformation ugi;
private Map<String, OMProxyInfo> omProxyInfos;
private List<String> retryExceptions = new ArrayList<>();
@@ -75,9 +67,7 @@ public class HadoopRpcOMFailoverProxyProvider<T> extends
UserGroupInformation ugi,
String omServiceId,
Class<T> protocol) throws IOException {
- super(configuration, omServiceId, protocol);
- this.ugi = ugi;
- this.omVersion = RPC.getProtocolVersion(protocol);
+ super(configuration, ugi, omServiceId, protocol);
this.delegationTokenService = computeDelegationTokenService();
}
@@ -130,24 +120,6 @@ public class HadoopRpcOMFailoverProxyProvider<T> extends
setOmNodeAddressMap(omNodeAddressMap);
}
- private T createOMProxy(InetSocketAddress omAddress) throws IOException {
- Configuration hadoopConf =
- LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf());
- RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class);
-
- // FailoverOnNetworkException ensures that the IPC layer does not attempt
- // retries on the same OM in case of connection exception. This retry
- // policy essentially results in TRY_ONCE_THEN_FAIL.
- RetryPolicy connectionRetryPolicy = RetryPolicies
- .failoverOnNetworkException(0);
-
- return (T) RPC.getProtocolProxy(getInterface(), omVersion,
- omAddress, ugi, hadoopConf, NetUtils.getDefaultSocketFactory(
- hadoopConf), (int) OmUtils.getOMClientRpcTimeOut(getConf()),
- connectionRetryPolicy).getProxy();
-
- }
-
/**
* Get the proxy object which should be used until the next failover event
* occurs. RPC proxy object is intialized lazily.
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
index 1a738b2ac8..5045a32bdc 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
@@ -21,17 +21,25 @@ package org.apache.hadoop.ozone.om.ha;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.protobuf.ServiceException;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException;
import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.ratis.protocol.exceptions.StateMachineException;
import org.slf4j.Logger;
@@ -85,13 +93,17 @@ public abstract class OMFailoverProxyProviderBase<T>
implements
private Set<String> accessControlExceptionOMs = new HashSet<>();
private boolean performFailoverDone;
+ private final UserGroupInformation ugi;
+
public OMFailoverProxyProviderBase(ConfigurationSource configuration,
+ UserGroupInformation ugi,
String omServiceId,
Class<T> protocol) throws IOException {
this.conf = configuration;
this.protocolClass = protocol;
this.performFailoverDone = true;
this.omServiceId = omServiceId;
+ this.ugi = ugi;
waitBetweenRetries = conf.getLong(
OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY,
@@ -112,6 +124,35 @@ public abstract class OMFailoverProxyProviderBase<T>
implements
String omSvcId)
throws IOException;
+ /**
+ * Get the protocol proxy for provided address.
+ * @param omAddress An instance of {@link InetSocketAddress} which contains
the address to connect
+ * @return the proxy connection to the address and the set of methods
supported by the server at the address
+ * @throws IOException if any error occurs while trying to get the proxy
+ */
+ protected T createOMProxy(InetSocketAddress omAddress) throws IOException {
+ Configuration hadoopConf =
+ LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf());
+
+ // TODO: Post upgrade to Protobuf 3.x we need to use ProtobufRpcEngine2
+ RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class);
+
+ // Ensure we do not attempt retry on the same OM in case of exceptions
+ RetryPolicy connectionRetryPolicy =
RetryPolicies.failoverOnNetworkException(0);
+
+ return (T) RPC.getProtocolProxy(
+ getInterface(),
+ RPC.getProtocolVersion(protocolClass),
+ omAddress,
+ ugi,
+ hadoopConf,
+ NetUtils.getDefaultSocketFactory(hadoopConf),
+ (int) OmUtils.getOMClientRpcTimeOut(getConf()),
+ connectionRetryPolicy
+ ).getProxy();
+ }
+
+
protected synchronized boolean shouldFailover(Exception ex) {
Throwable unwrappedException = HddsUtils.getUnwrappedException(ex);
if (unwrappedException instanceof AccessControlException ||
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index ac2e85da84..c9eb9cbb44 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -121,6 +121,7 @@ public class GrpcOmTransport implements OmTransport {
omFailoverProxyProvider = new GrpcOMFailoverProxyProvider(
conf,
+ ugi,
omServiceId,
OzoneManagerProtocolPB.class);
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
index 39d26c362f..026dfa1edc 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -96,7 +96,7 @@ services:
- 9878:9878
env_file:
- ./docker-config
- command: ["/opt/hadoop/bin/ozone","s3g"]
+ command: ["/opt/hadoop/bin/ozone","s3g",
"-Dozone.om.transport.class=${OZONE_S3_OM_TRANSPORT:-org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory}"]
environment:
OZONE_OPTS:
recon:
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh
b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh
index 644e45c4d5..a9e87a60cd 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh
@@ -25,6 +25,7 @@ source "$COMPOSE_DIR/../testlib.sh"
export SECURITY_ENABLED=true
export COMPOSE_FILE=docker-compose.yaml:fcq.yaml
+export
OZONE_S3_OM_TRANSPORT="org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory"
start_docker_env
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
index e9b5dd5df7..e0c2fc7f81 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
@@ -45,15 +45,19 @@ S3 Gateway Secret Already Exists
Should contain ${result} HTTP/1.1 400
S3_SECRET_ALREADY_EXISTS ignore_case=True
S3 Gateway Generate Secret By Username
- [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this
check as security is not enabled
${result} = Execute curl -X PUT
--negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
Should contain ${result} HTTP/1.1 200
OK ignore_case=True
Should Match Regexp ${result}
<awsAccessKey>.*</awsAccessKey><awsSecret>.*</awsSecret>
S3 Gateway Generate Secret By Username For Other User
- [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this
check as security is not enabled
${result} = Execute curl -X PUT
--negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
Should contain ${result} HTTP/1.1 200
OK ignore_case=True
Should Match Regexp ${result}
<awsAccessKey>.*</awsAccessKey><awsSecret>.*</awsSecret>
+
+S3 Gateway Reject Secret Generation By Non-admin User
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this
check as security is not enabled
+ Run Keyword Kinit test user
testuser2 testuser2.keytab
+ ${result} = Execute curl -X PUT
--negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
+ Should contain ${result} HTTP/1.1 403
FORBIDDEN ignore_case=True
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
index 59725c0416..ffb03a85a8 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
@@ -38,15 +38,19 @@ S3 Gateway Revoke Secret
Should contain ${result} HTTP/1.1 200 OK
ignore_case=True
S3 Gateway Revoke Secret By Username
- [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this
check as security is not enabled
Execute ozone s3 getsecret
-u testuser ${OM_HA_PARAM}
${result} = Execute curl -X DELETE
--negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
Should contain ${result} HTTP/1.1 200 OK
ignore_case=True
S3 Gateway Revoke Secret By Username For Other User
- [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this
check as security is not enabled
Execute ozone s3 getsecret
-u testuser2 ${OM_HA_PARAM}
${result} = Execute curl -X DELETE
--negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
Should contain ${result} HTTP/1.1 200 OK
ignore_case=True
+
+S3 Gateway Reject Secret Revoke By Non-admin User
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this
check as security is not enabled
+ Run Keyword Kinit test user
testuser2 testuser2.keytab
+ ${result} = Execute curl -X DELETE
--negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
+ Should contain ${result} HTTP/1.1 403
FORBIDDEN ignore_case=True
\ No newline at end of file
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java
index c09c5b91af..cad987bb7d 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java
@@ -19,21 +19,11 @@ package org.apache.hadoop.ozone.om;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.util.Collection;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS;
-
/**
* Utility class for ozone configurations.
*/
@@ -43,38 +33,6 @@ public final class OzoneConfigUtil {
private OzoneConfigUtil() {
}
- /**
- * Return list of s3 administrators prop from config.
- *
- * If ozone.s3.administrators value is empty string or unset,
- * defaults to ozone.administrators value.
- */
- static Collection<String> getS3AdminsFromConfig(OzoneConfiguration conf)
- throws IOException {
- Collection<String> ozAdmins =
- conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS);
- if (ozAdmins == null || ozAdmins.isEmpty()) {
- ozAdmins = conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS);
- }
- String omSPN = UserGroupInformation.getCurrentUser().getShortUserName();
- if (!ozAdmins.contains(omSPN)) {
- ozAdmins.add(omSPN);
- }
- return ozAdmins;
- }
-
- static Collection<String> getS3AdminsGroupsFromConfig(
- OzoneConfiguration conf) {
- Collection<String> s3AdminsGroup =
- conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS);
- if (s3AdminsGroup.isEmpty() && conf
- .getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) {
- s3AdminsGroup = conf
- .getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS);
- }
- return s3AdminsGroup;
- }
-
public static ReplicationConfig resolveReplicationConfigPreference(
HddsProtos.ReplicationType clientType,
HddsProtos.ReplicationFactor clientFactor,
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 8cb081028f..eacd38375f 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -695,11 +695,7 @@ public final class OzoneManager extends
ServiceRuntimeInfoImpl
// Get read only admin list
readOnlyAdmins = OzoneAdmins.getReadonlyAdmins(conf);
- Collection<String> s3AdminUsernames =
- OzoneConfigUtil.getS3AdminsFromConfig(configuration);
- Collection<String> s3AdminGroups =
- OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration);
- s3OzoneAdmins = new OzoneAdmins(s3AdminUsernames, s3AdminGroups);
+ s3OzoneAdmins = OzoneAdmins.getS3Admins(conf);
instantiateServices(false);
// Create special volume s3v which is required for S3G.
@@ -4345,7 +4341,7 @@ public final class OzoneManager extends
ServiceRuntimeInfoImpl
}
public boolean isS3Admin(UserGroupInformation callerUgi) {
- return callerUgi != null && s3OzoneAdmins.isAdmin(callerUgi);
+ return OzoneAdmins.isS3Admin(callerUgi, s3OzoneAdmins);
}
@VisibleForTesting
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java
index 0bd99d4949..41d6c28e2b 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java
@@ -20,16 +20,10 @@ import
org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import java.io.IOException;
-import java.util.Arrays;
-
-import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -119,42 +113,4 @@ public class TestOzoneConfigUtil {
// should return ratis.
assertEquals(ratisReplicationConfig, replicationConfig);
}
-
- @Test
- public void testS3AdminExtraction() throws IOException {
- OzoneConfiguration configuration = new OzoneConfiguration();
- configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice,bob");
-
- assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration))
- .containsAll(Arrays.asList("alice", "bob"));
- }
-
- @Test
- public void testS3AdminExtractionWithFallback() throws IOException {
- OzoneConfiguration configuration = new OzoneConfiguration();
- configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice,bob");
-
- assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration))
- .containsAll(Arrays.asList("alice", "bob"));
- }
-
- @Test
- public void testS3AdminGroupExtraction() {
- OzoneConfiguration configuration = new OzoneConfiguration();
- configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS,
- "test1, test2");
-
- assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration))
- .containsAll(Arrays.asList("test1", "test2"));
- }
-
- @Test
- public void testS3AdminGroupExtractionWithFallback() {
- OzoneConfiguration configuration = new OzoneConfiguration();
- configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS,
- "test1, test2");
-
- assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration))
- .containsAll(Arrays.asList("test1", "test2"));
- }
}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java
new file mode 100644
index 0000000000..b5c7b242cb
--- /dev/null
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.s3secret;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import javax.ws.rs.NameBinding;
+
+/**
+ * Annotation to only allow admin users to access the endpoint.
+ */
+@NameBinding
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE, ElementType.METHOD})
+public @interface S3AdminEndpoint {
+}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java
new file mode 100644
index 0000000000..5ecdfa7c12
--- /dev/null
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.s3secret;
+
+
+import javax.inject.Inject;
+import javax.ws.rs.container.ContainerRequestContext;
+import javax.ws.rs.container.ContainerRequestFilter;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+import javax.ws.rs.ext.Provider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.server.OzoneAdmins;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.security.Principal;
+
+/**
+ * Filter that only allows admin to access endpoints annotated with {@link
S3AdminEndpoint}.
+ * Condition is based on the value of the configuration keys for:
+ * <ul>
+ * <li>ozone.administrators</li>
+ * <li>ozone.administrators.groups</li>
+ * </ul>
+ */
+@S3AdminEndpoint
+@Provider
+public class S3SecretAdminFilter implements ContainerRequestFilter {
+
+ @Inject
+ private OzoneConfiguration conf;
+
+ @Override
+ public void filter(ContainerRequestContext requestContext) throws
IOException {
+ final Principal userPrincipal =
requestContext.getSecurityContext().getUserPrincipal();
+ if (null != userPrincipal) {
+ UserGroupInformation user =
UserGroupInformation.createRemoteUser(userPrincipal.getName());
+ if (!OzoneAdmins.isS3Admin(user, conf)) {
+ requestContext.abortWith(Response.status(Status.FORBIDDEN).build());
+ }
+ }
+ }
+}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
index 4ea17d2a2f..739dadfb28 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
@@ -33,7 +33,6 @@ import javax.ws.rs.core.Response;
import java.io.IOException;
import static javax.ws.rs.core.Response.Status.BAD_REQUEST;
-import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED;
import static javax.ws.rs.core.Response.Status.NOT_FOUND;
/**
@@ -41,6 +40,7 @@ import static javax.ws.rs.core.Response.Status.NOT_FOUND;
*/
@Path("/secret")
@S3SecretEnabled
+@S3AdminEndpoint
public class S3SecretManagementEndpoint extends S3SecretEndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(S3SecretManagementEndpoint.class);
@@ -54,8 +54,7 @@ public class S3SecretManagementEndpoint extends
S3SecretEndpointBase {
@Path("/{username}")
public Response generate(@PathParam("username") String username)
throws IOException {
- // TODO: It is a temporary solution. To be removed after HDDS-11041 is
done.
- return Response.status(METHOD_NOT_ALLOWED).build();
+ return generateInternal(username);
}
private Response generateInternal(@Nullable String username) throws
IOException {
@@ -95,8 +94,7 @@ public class S3SecretManagementEndpoint extends
S3SecretEndpointBase {
@Path("/{username}")
public Response revoke(@PathParam("username") String username)
throws IOException {
- // TODO: It is a temporary solution. To be removed after HDDS-11041 is
done.
- return Response.status(METHOD_NOT_ALLOWED).build();
+ return revokeInternal(username);
}
private Response revokeInternal(@Nullable String username)
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
index d1f81faddd..b548d17d9f 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -93,7 +92,7 @@ class TestSecretGenerate {
hasNoSecretYet();
S3SecretResponse response =
- (S3SecretResponse) endpoint.generate().getEntity();
+ (S3SecretResponse) endpoint.generate().getEntity();
assertEquals(USER_SECRET, response.getAwsSecret());
assertEquals(USER_NAME, response.getAwsAccessKey());
@@ -112,12 +111,11 @@ class TestSecretGenerate {
}
@Test
- @Unhealthy("HDDS-11041")
void testSecretGenerateWithUsername() throws IOException {
hasNoSecretYet();
S3SecretResponse response =
- (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity();
+ (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity();
assertEquals(USER_SECRET, response.getAwsSecret());
assertEquals(OTHER_USER_NAME, response.getAwsAccessKey());
}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
index 85e6bd4c10..b26df0e899 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.ObjectStoreStub;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -98,7 +97,6 @@ public class TestSecretRevoke {
}
@Test
- @Unhealthy("HDDS-11041")
void testSecretRevokeWithUsername() throws IOException {
endpoint.revoke(OTHER_USER_NAME);
verify(objectStore, times(1))
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]