This is an automated email from the ASF dual-hosted git repository.
mchades pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git
The following commit(s) were added to refs/heads/main by this push:
new fee9afaea [#4247] improvement(docs, catalog-hadoop): Add docs about
multiple-level authentication for Hadoop catalog. (#4248)
fee9afaea is described below
commit fee9afaea6bb1f42dc07cb68dd135d4a6a496772
Author: Qi Yu <[email protected]>
AuthorDate: Wed Jul 31 19:21:30 2024 +0800
[#4247] improvement(docs, catalog-hadoop): Add docs about multiple-level
authentication for Hadoop catalog. (#4248)
### What changes were proposed in this pull request?
Add some details about multiple-level authentication for the Hadoop
catalog.
### Why are the changes needed?
To better serve our customers.
Fix: #4247
### Does this PR introduce _any_ user-facing change?
N/A.
### How was this patch tested?
N/A.
---
.../catalog/hadoop/HadoopProxyPlugin.java | 88 -------
.../hadoop/SecureHadoopCatalogOperations.java | 268 +++------------------
.../authentication/AuthenticationConfig.java | 6 +-
.../hadoop/authentication/KerberosUserContext.java | 119 +++++++++
.../hadoop/authentication/SimpleUserContext.java | 52 ++++
.../catalog/hadoop/authentication/UserContext.java | 176 ++++++++++++++
.../test/HadoopUserAuthenticationIT.java | 100 ++++++++
docs/gravitino-server-config.md | 15 +-
docs/hadoop-catalog.md | 31 ++-
docs/lakehouse-iceberg-catalog.md | 1 +
10 files changed, 522 insertions(+), 334 deletions(-)
diff --git
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopProxyPlugin.java
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopProxyPlugin.java
deleted file mode 100644
index 4421177bd..000000000
---
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopProxyPlugin.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.gravitino.catalog.hadoop;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.Principal;
-import java.security.PrivilegedActionException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Map;
-import org.apache.gravitino.connector.CatalogOperations;
-import org.apache.gravitino.connector.ProxyPlugin;
-import org.apache.gravitino.utils.Executable;
-import org.apache.hadoop.security.UserGroupInformation;
-
-@Deprecated
-public class HadoopProxyPlugin implements ProxyPlugin {
- private SecureHadoopCatalogOperations ops;
- private final UserGroupInformation realUser;
-
- public HadoopProxyPlugin() {
- try {
- realUser = UserGroupInformation.getCurrentUser();
- } catch (IOException ioe) {
- throw new IllegalStateException("Fail to init HadoopCatalogProxyPlugin");
- }
- }
-
- @Override
- public Object doAs(
- Principal principal, Executable<Object, Exception> action, Map<String,
String> properties)
- throws Throwable {
- try {
- UserGroupInformation proxyUser;
-
- if (UserGroupInformation.isSecurityEnabled() && ops != null) {
- // The Gravitino server may use multiple KDC servers.
- // The http authentication use one KDC server, the Hadoop catalog may
use another KDC
- // server.
- // The KerberosAuthenticator will remove realm of principal.
- // And then we add the realm of Hadoop catalog to the user.
- String proxyKerberosPrincipalName = principal.getName();
- if (!proxyKerberosPrincipalName.contains("@")) {
- proxyKerberosPrincipalName =
- String.format("%s@%s", proxyKerberosPrincipalName,
ops.getKerberosRealm());
- }
-
- proxyUser =
UserGroupInformation.createProxyUser(proxyKerberosPrincipalName, realUser);
- } else {
- proxyUser = UserGroupInformation.createProxyUser(principal.getName(),
realUser);
- }
-
- return proxyUser.doAs((PrivilegedExceptionAction<Object>)
action::execute);
- } catch (UndeclaredThrowableException e) {
- Throwable innerException = e.getCause();
- if (innerException instanceof PrivilegedActionException) {
- throw innerException.getCause();
- } else if (innerException instanceof InvocationTargetException) {
- throw innerException.getCause();
- } else {
- throw innerException;
- }
- }
- }
-
- @Override
- public void bindCatalogOperation(CatalogOperations ops) {
- this.ops = ((SecureHadoopCatalogOperations) ops);
- }
-}
diff --git
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/SecureHadoopCatalogOperations.java
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/SecureHadoopCatalogOperations.java
index 51b7069bd..1952cbd25 100644
---
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/SecureHadoopCatalogOperations.java
+++
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/SecureHadoopCatalogOperations.java
@@ -19,21 +19,9 @@
package org.apache.gravitino.catalog.hadoop;
-import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import java.io.Closeable;
-import java.io.File;
import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedActionException;
-import java.security.PrivilegedExceptionAction;
import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import javax.security.auth.Subject;
@@ -45,9 +33,7 @@ import org.apache.gravitino.Namespace;
import org.apache.gravitino.Schema;
import org.apache.gravitino.SchemaChange;
import org.apache.gravitino.UserPrincipal;
-import org.apache.gravitino.catalog.hadoop.authentication.AuthenticationConfig;
-import
org.apache.gravitino.catalog.hadoop.authentication.kerberos.KerberosClient;
-import
org.apache.gravitino.catalog.hadoop.authentication.kerberos.KerberosConfig;
+import org.apache.gravitino.catalog.hadoop.authentication.UserContext;
import org.apache.gravitino.connector.CatalogInfo;
import org.apache.gravitino.connector.CatalogOperations;
import org.apache.gravitino.connector.HasPropertyMetadata;
@@ -65,9 +51,6 @@ import org.apache.gravitino.file.FilesetChange;
import org.apache.gravitino.meta.FilesetEntity;
import org.apache.gravitino.meta.SchemaEntity;
import org.apache.gravitino.utils.PrincipalUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -79,13 +62,9 @@ public class SecureHadoopCatalogOperations
private final HadoopCatalogOperations hadoopCatalogOperations;
- private final List<Closeable> closeables = Lists.newArrayList();
-
- private final Map<NameIdentifier, UserInfo> userInfoMap =
Maps.newConcurrentMap();
-
public static final String GRAVITINO_KEYTAB_FORMAT = "keytabs/gravitino-%s";
- private String kerberosRealm;
+ private UserContext catalogUserContext;
public SecureHadoopCatalogOperations() {
this.hadoopCatalogOperations = new HadoopCatalogOperations();
@@ -100,33 +79,6 @@ public class SecureHadoopCatalogOperations
return hadoopCatalogOperations;
}
- public String getKerberosRealm() {
- return kerberosRealm;
- }
-
- static class UserInfo {
- UserGroupInformation loginUser;
- boolean enableUserImpersonation;
- String keytabPath;
- String realm;
-
- static UserInfo of(
- UserGroupInformation loginUser,
- boolean enableUserImpersonation,
- String keytabPath,
- String kerberosRealm) {
- UserInfo userInfo = new UserInfo();
- userInfo.loginUser = loginUser;
- userInfo.enableUserImpersonation = enableUserImpersonation;
- userInfo.keytabPath = keytabPath;
- userInfo.realm = kerberosRealm;
- return userInfo;
- }
- }
-
- // We have overridden the createFileset, dropFileset, createSchema,
dropSchema method to reset
- // the current user based on the name identifier.
-
@Override
public Fileset createFileset(
NameIdentifier ident,
@@ -135,10 +87,12 @@ public class SecureHadoopCatalogOperations
String storageLocation,
Map<String, String> properties)
throws NoSuchSchemaException, FilesetAlreadyExistsException {
- UserGroupInformation currentUser = getUGIByIdent(properties, ident);
String apiUser = PrincipalUtils.getCurrentUserName();
- return doAs(
- currentUser,
+
+ UserContext userContext =
+ UserContext.getUserContext(
+ ident, properties, null, hadoopCatalogOperations.getCatalogInfo());
+ return userContext.doAs(
() -> {
setUser(apiUser);
return hadoopCatalogOperations.createFileset(
@@ -162,23 +116,22 @@ public class SecureHadoopCatalogOperations
throw new RuntimeException("Failed to delete fileset " + ident, ioe);
}
- // Reset the current user based on the name identifier.
- UserGroupInformation currentUser =
getUGIByIdent(filesetEntity.properties(), ident);
-
- boolean r = doAs(currentUser, () ->
hadoopCatalogOperations.dropFileset(ident), ident);
- cleanUserInfo(ident);
+ UserContext userContext =
+ UserContext.getUserContext(
+ ident, filesetEntity.properties(), null,
hadoopCatalogOperations.getCatalogInfo());
+ boolean r = userContext.doAs(() ->
hadoopCatalogOperations.dropFileset(ident), ident);
+ UserContext.clearUserContext(ident);
return r;
}
@Override
public Schema createSchema(NameIdentifier ident, String comment, Map<String,
String> properties)
throws NoSuchCatalogException, SchemaAlreadyExistsException {
- // Reset the current user based on the name identifier and properties.
- UserGroupInformation currentUser = getUGIByIdent(properties, ident);
String apiUser = PrincipalUtils.getCurrentUserName();
-
- return doAs(
- currentUser,
+ UserContext userContext =
+ UserContext.getUserContext(
+ ident, properties, null, hadoopCatalogOperations.getCatalogInfo());
+ return userContext.doAs(
() -> {
setUser(apiUser);
return hadoopCatalogOperations.createSchema(ident, comment,
properties);
@@ -196,11 +149,12 @@ public class SecureHadoopCatalogOperations
Map<String, String> properties =
Optional.ofNullable(schemaEntity.properties()).orElse(Collections.emptyMap());
- // Reset the current user based on the name identifier.
- UserGroupInformation user = getUGIByIdent(properties, ident);
+ UserContext userContext =
+ UserContext.getUserContext(
+ ident, properties, null,
hadoopCatalogOperations.getCatalogInfo());
+ boolean r = userContext.doAs(() ->
hadoopCatalogOperations.dropSchema(ident, cascade), ident);
+ UserContext.clearUserContext(ident);
- boolean r = doAs(user, () -> hadoopCatalogOperations.dropSchema(ident,
cascade), ident);
- cleanUserInfo(ident);
return r;
} catch (IOException ioe) {
throw new RuntimeException("Failed to delete schema " + ident, ioe);
@@ -212,7 +166,12 @@ public class SecureHadoopCatalogOperations
Map<String, String> config, CatalogInfo info, HasPropertyMetadata
propertiesMetadata)
throws RuntimeException {
hadoopCatalogOperations.initialize(config, info, propertiesMetadata);
- initAuthentication(hadoopCatalogOperations.getConf(),
hadoopCatalogOperations.getHadoopConf());
+ catalogUserContext =
+ UserContext.getUserContext(
+ NameIdentifier.of(info.namespace(), info.name()),
+ config,
+ hadoopCatalogOperations.getHadoopConf(),
+ info);
}
@Override
@@ -227,10 +186,7 @@ public class SecureHadoopCatalogOperations
}
}
if (!ident.name().equals(finalName)) {
- UserInfo userInfo = userInfoMap.remove(ident);
- if (userInfo != null) {
- userInfoMap.put(NameIdentifier.of(ident.namespace(), finalName),
userInfo);
- }
+ UserContext.clearUserContext(NameIdentifier.of(ident.namespace(),
finalName));
}
return fileset;
@@ -266,15 +222,9 @@ public class SecureHadoopCatalogOperations
public void close() throws IOException {
hadoopCatalogOperations.close();
- userInfoMap.clear();
- closeables.forEach(
- c -> {
- try {
- c.close();
- } catch (Exception e) {
- LOG.error("Failed to close resource", e);
- }
- });
+ catalogUserContext.close();
+
+ UserContext.cleanAllUserContext();
}
@Override
@@ -288,162 +238,6 @@ public class SecureHadoopCatalogOperations
hadoopCatalogOperations.testConnection(catalogIdent, type, provider,
comment, properties);
}
- private void initAuthentication(Map<String, String> conf, Configuration
hadoopConf) {
- AuthenticationConfig config = new AuthenticationConfig(conf);
- CatalogInfo catalogInfo = hadoopCatalogOperations.getCatalogInfo();
- if (config.isKerberosAuth()) {
- initKerberos(
- conf, hadoopConf, NameIdentifier.of(catalogInfo.namespace(),
catalogInfo.name()), true);
- } else if (config.isSimpleAuth()) {
- try {
- // Use service login user.
- UserGroupInformation u = UserGroupInformation.getCurrentUser();
- userInfoMap.put(
- NameIdentifier.of(catalogInfo.namespace(), catalogInfo.name()),
- UserInfo.of(u, config.isImpersonationEnabled(), null, null));
- } catch (Exception e) {
- throw new RuntimeException("Can't get service user for Hadoop
catalog", e);
- }
- }
- }
-
- /**
- * Get the UserGroupInformation based on the NameIdentifier and properties.
- *
- * <p>Note: As UserGroupInformation is a static class, to avoid the thread
safety issue, we need
- * to use synchronized to ensure the thread safety: Make login and
getLoginUser atomic.
- */
- public synchronized String initKerberos(
- Map<String, String> properties,
- Configuration configuration,
- NameIdentifier ident,
- boolean refreshCredentials) {
- // Init schema level kerberos authentication.
-
- CatalogInfo catalogInfo = hadoopCatalogOperations.getCatalogInfo();
- String keytabPath =
- String.format(
- GRAVITINO_KEYTAB_FORMAT, catalogInfo.id() + "-" +
ident.toString().replace(".", "-"));
- KerberosConfig kerberosConfig = new KerberosConfig(properties);
- if (kerberosConfig.isKerberosAuth()) {
- configuration.set(
- HADOOP_SECURITY_AUTHENTICATION,
- AuthenticationMethod.KERBEROS.name().toLowerCase(Locale.ROOT));
- try {
- UserGroupInformation.setConfiguration(configuration);
- KerberosClient kerberosClient =
- new KerberosClient(properties, configuration, refreshCredentials);
- // Add the kerberos client to the closable to close resources.
- closeables.add(kerberosClient);
-
- File keytabFile = kerberosClient.saveKeyTabFileFromUri(keytabPath);
- String kerberosRealm =
kerberosClient.login(keytabFile.getAbsolutePath());
- // Should this kerberosRealm need to be equals to the realm in the
principal?
- userInfoMap.put(
- ident,
- UserInfo.of(
- UserGroupInformation.getLoginUser(),
- kerberosConfig.isImpersonationEnabled(),
- keytabPath,
- kerberosRealm));
- return kerberosRealm;
- } catch (IOException e) {
- throw new RuntimeException("Failed to login with Kerberos", e);
- }
- }
-
- return null;
- }
-
- private UserGroupInformation getUGIByIdent(Map<String, String> properties,
NameIdentifier ident) {
- KerberosConfig kerberosConfig = new KerberosConfig(properties);
- if (kerberosConfig.isKerberosAuth()) {
- // We assume that the realm of catalog is the same as the realm of the
schema and table.
- initKerberos(properties, new Configuration(), ident, false);
- }
- // If the kerberos is not enabled (simple mode), we will use the current
user
- return getUserBaseOnNameIdentifier(ident);
- }
-
- private UserGroupInformation getUserBaseOnNameIdentifier(NameIdentifier
nameIdentifier) {
- UserInfo userInfo = getNearestUserGroupInformation(nameIdentifier);
- if (userInfo == null) {
- throw new RuntimeException("Failed to get user information for " +
nameIdentifier);
- }
-
- UserGroupInformation ugi = userInfo.loginUser;
- boolean userImpersonation = userInfo.enableUserImpersonation;
- if (userImpersonation) {
- String proxyKerberosPrincipalName = PrincipalUtils.getCurrentUserName();
- if (!proxyKerberosPrincipalName.contains("@")) {
- proxyKerberosPrincipalName =
- String.format("%s@%s", proxyKerberosPrincipalName, userInfo.realm);
- }
-
- ugi = UserGroupInformation.createProxyUser(proxyKerberosPrincipalName,
ugi);
- }
-
- return ugi;
- }
-
- private UserInfo getNearestUserGroupInformation(NameIdentifier
nameIdentifier) {
- NameIdentifier currentNameIdentifier = nameIdentifier;
- while (currentNameIdentifier != null) {
- if (userInfoMap.containsKey(currentNameIdentifier)) {
- return userInfoMap.get(currentNameIdentifier);
- }
-
- String[] levels = currentNameIdentifier.namespace().levels();
- // The ident is catalog level.
- if (levels.length <= 1) {
- return null;
- }
- currentNameIdentifier =
NameIdentifier.of(currentNameIdentifier.namespace().levels());
- }
- return null;
- }
-
- private void cleanUserInfo(NameIdentifier identifier) {
- UserInfo userInfo = userInfoMap.remove(identifier);
- if (userInfo != null) {
- removeFile(userInfo.keytabPath);
- }
- }
-
- private void removeFile(String filePath) {
- if (filePath == null) {
- return;
- }
-
- File file = new File(filePath);
- if (file.exists()) {
- boolean isDeleted = file.delete();
- if (!isDeleted) {
- LOG.error("Failed to delete file: {}", filePath);
- }
- }
- }
-
- private <T> T doAs(
- UserGroupInformation userGroupInformation,
- PrivilegedExceptionAction<T> action,
- NameIdentifier ident) {
- try {
- return userGroupInformation.doAs(action);
- } catch (IOException | InterruptedException ioe) {
- throw new RuntimeException("Failed to operation on fileset " + ident,
ioe);
- } catch (UndeclaredThrowableException e) {
- Throwable innerException = e.getCause();
- if (innerException instanceof PrivilegedActionException) {
- throw new RuntimeException(innerException.getCause());
- } else if (innerException instanceof InvocationTargetException) {
- throw new RuntimeException(innerException.getCause());
- } else {
- throw new RuntimeException(innerException);
- }
- }
- }
-
/**
* Add the user to the subject so that we can get the last user in the
subject. Hadoop catalog
* uses this method to pass api user from the client side, so that we can
get the user in the
diff --git
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/AuthenticationConfig.java
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/AuthenticationConfig.java
index 384716ce1..ac69f406a 100644
---
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/AuthenticationConfig.java
+++
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/AuthenticationConfig.java
@@ -36,7 +36,11 @@ public class AuthenticationConfig extends Config {
enum AuthenticationType {
SIMPLE,
- KERBEROS
+ KERBEROS;
+
+ public static AuthenticationType fromString(String type) {
+ return AuthenticationType.valueOf(type.toUpperCase());
+ }
}
public static final boolean KERBEROS_DEFAULT_IMPERSONATION_ENABLE = false;
diff --git
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/KerberosUserContext.java
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/KerberosUserContext.java
new file mode 100644
index 000000000..e6bffe984
--- /dev/null
+++
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/KerberosUserContext.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.catalog.hadoop.authentication;
+
+import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+import
org.apache.gravitino.catalog.hadoop.authentication.kerberos.KerberosClient;
+import org.apache.gravitino.utils.PrincipalUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class KerberosUserContext extends UserContext {
+ public static final Logger LOGGER =
LoggerFactory.getLogger(KerberosUserContext.class);
+
+ private UserGroupInformation userGroupInformation;
+ private boolean enableUserImpersonation;
+ private String kerberosRealm;
+ private final String keytab;
+
+ private KerberosClient kerberosClient;
+
+ KerberosUserContext(boolean enableUserImpersonation, String keytabPath) {
+ this.enableUserImpersonation = enableUserImpersonation;
+ this.keytab = keytabPath;
+ }
+
+ public void setEnableUserImpersonation(boolean enableUserImpersonation) {
+ this.enableUserImpersonation = enableUserImpersonation;
+ }
+
+ public synchronized void initKerberos(
+ Map<String, String> properties, Configuration configuration, boolean
refreshCredentials) {
+ configuration.set(
+ HADOOP_SECURITY_AUTHENTICATION,
+ AuthenticationMethod.KERBEROS.name().toLowerCase(Locale.ROOT));
+ try {
+ UserGroupInformation.setConfiguration(configuration);
+ KerberosClient client = new KerberosClient(properties, configuration,
refreshCredentials);
+ // Add the kerberos client to the closable to close resources.
+ this.kerberosClient = client;
+ File keytabFile = client.saveKeyTabFileFromUri(keytab);
+ this.kerberosRealm = client.login(keytabFile.getAbsolutePath());
+ this.userGroupInformation = UserGroupInformation.getLoginUser();
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to login with Kerberos", e);
+ }
+ }
+
+ @Override
+ UserGroupInformation createProxyUser() {
+ String proxyKerberosPrincipalName = PrincipalUtils.getCurrentUserName();
+ if (!proxyKerberosPrincipalName.contains("@")) {
+ proxyKerberosPrincipalName =
+ String.format("%s@%s", proxyKerberosPrincipalName, kerberosRealm);
+ }
+
+ return UserGroupInformation.createProxyUser(proxyKerberosPrincipalName,
getUser());
+ }
+
+ @Override
+ UserGroupInformation getUser() {
+ return userGroupInformation;
+ }
+
+ @Override
+ boolean enableUserImpersonation() {
+ return enableUserImpersonation;
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (kerberosClient != null) {
+ kerberosClient.close();
+ }
+
+ if (keytab == null) {
+ return;
+ }
+
+ File file = new File(keytab);
+ if (file.exists()) {
+ boolean isDeleted = file.delete();
+ if (!isDeleted) {
+ LOGGER.error("Failed to delete file: {}", keytab);
+ }
+ }
+ }
+
+ public KerberosUserContext deepCopy() {
+ KerberosUserContext copy = new
KerberosUserContext(enableUserImpersonation, keytab);
+ copy.userGroupInformation = userGroupInformation;
+ copy.kerberosRealm = kerberosRealm;
+ return copy;
+ }
+}
diff --git
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/SimpleUserContext.java
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/SimpleUserContext.java
new file mode 100644
index 000000000..7176f4055
--- /dev/null
+++
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/SimpleUserContext.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.catalog.hadoop.authentication;
+
+import java.io.IOException;
+import org.apache.gravitino.utils.PrincipalUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+public class SimpleUserContext extends UserContext {
+ private final UserGroupInformation userGroupInformation;
+ private final boolean enableUserImpersonation;
+
+ SimpleUserContext(UserGroupInformation userGroupInformation, boolean
enableUserImpersonation) {
+ this.userGroupInformation = userGroupInformation;
+ this.enableUserImpersonation = enableUserImpersonation;
+ }
+
+ @Override
+ UserGroupInformation getUser() {
+ return userGroupInformation;
+ }
+
+ @Override
+ boolean enableUserImpersonation() {
+ return enableUserImpersonation;
+ }
+
+ @Override
+ public void close() throws IOException {}
+
+ @Override
+ UserGroupInformation createProxyUser() {
+ return
UserGroupInformation.createProxyUser(PrincipalUtils.getCurrentUserName(),
getUser());
+ }
+}
diff --git
a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/UserContext.java
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/UserContext.java
new file mode 100644
index 000000000..1023be45f
--- /dev/null
+++
b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/authentication/UserContext.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.catalog.hadoop.authentication;
+
+import static
org.apache.gravitino.catalog.hadoop.SecureHadoopCatalogOperations.GRAVITINO_KEYTAB_FORMAT;
+import static
org.apache.gravitino.catalog.hadoop.authentication.AuthenticationConfig.AUTH_TYPE_ENTRY;
+import static
org.apache.gravitino.catalog.hadoop.authentication.AuthenticationConfig.ENABLE_IMPERSONATION_ENTRY;
+import static
org.apache.gravitino.catalog.hadoop.authentication.AuthenticationConfig.IMPERSONATION_ENABLE_KEY;
+
+import com.google.common.collect.Maps;
+import java.io.Closeable;
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Map;
+import org.apache.gravitino.NameIdentifier;
+import
org.apache.gravitino.catalog.hadoop.authentication.AuthenticationConfig.AuthenticationType;
+import org.apache.gravitino.connector.CatalogInfo;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+
+public abstract class UserContext implements Closeable {
+
+ private static final Map<NameIdentifier, UserContext> userContextMap =
Maps.newConcurrentMap();
+
+ private static void addUserContext(NameIdentifier nameIdentifier,
UserContext userContext) {
+ userContextMap.put(nameIdentifier, userContext);
+ }
+
+ public static void clearUserContext(NameIdentifier nameIdentifier) {
+ UserContext userContext = userContextMap.remove(nameIdentifier);
+ if (userContext != null) {
+ try {
+ userContext.close();
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to close user context", e);
+ }
+ }
+ }
+
+ public static void cleanAllUserContext() {
+ userContextMap.keySet().forEach(UserContext::clearUserContext);
+ userContextMap.clear();
+ }
+
+ public static UserContext getUserContext(
+ NameIdentifier nameIdentifier,
+ Map<String, String> properties,
+ Configuration configuration,
+ CatalogInfo catalogInfo) {
+ // Try to get the parent user context.
+ NameIdentifier currentNameIdentifier =
NameIdentifier.of(nameIdentifier.namespace().levels());
+ UserContext parentContext = null;
+ while (!currentNameIdentifier.namespace().isEmpty()) {
+ if (userContextMap.containsKey(currentNameIdentifier)) {
+ parentContext = userContextMap.get(currentNameIdentifier);
+ break;
+ }
+ currentNameIdentifier =
NameIdentifier.of(currentNameIdentifier.namespace().levels());
+ }
+
+ if (configuration == null) {
+ configuration = new Configuration();
+ }
+ AuthenticationConfig authenticationConfig = new
AuthenticationConfig(properties);
+
+ // If we do not set the impersonation, we will use the parent context;
+ boolean enableUserImpersonation =
ENABLE_IMPERSONATION_ENTRY.getDefaultValue();
+ if (properties.containsKey(IMPERSONATION_ENABLE_KEY)) {
+ enableUserImpersonation = authenticationConfig.isImpersonationEnabled();
+ } else if (parentContext != null) {
+ enableUserImpersonation = parentContext.enableUserImpersonation();
+ }
+
+ AuthenticationType authenticationType =
+ AuthenticationType.fromString(AUTH_TYPE_ENTRY.getDefaultValue());
+ // If we do not set the authentication type explicitly, we will use the
parent context. If the
+ // parent is null, then we will use the default value.
+ if (properties.containsKey(AuthenticationConfig.AUTH_TYPE_KEY)) {
+ authenticationType =
+ authenticationConfig.isSimpleAuth()
+ ? AuthenticationType.SIMPLE
+ : AuthenticationType.KERBEROS;
+
+ } else if (parentContext != null) {
+ authenticationType =
+ parentContext instanceof SimpleUserContext
+ ? AuthenticationType.SIMPLE
+ : AuthenticationType.KERBEROS;
+ }
+
+ UserGroupInformation currentUser;
+ try {
+ currentUser = UserGroupInformation.getCurrentUser();
+ } catch (IOException ioException) {
+ throw new RuntimeException("Failed to get current user", ioException);
+ }
+
+ if (authenticationType == AuthenticationType.SIMPLE) {
+ UserGroupInformation userGroupInformation =
+ parentContext != null ? parentContext.getUser() : currentUser;
+ SimpleUserContext simpleUserContext =
+ new SimpleUserContext(userGroupInformation, enableUserImpersonation);
+ addUserContext(nameIdentifier, simpleUserContext);
+ return simpleUserContext;
+ } else if (authenticationType == AuthenticationType.KERBEROS) {
+ // if the kerberos authentication is inherited from the parent context,
we will use the
+ // parent context's kerberos configuration.
+ if (authenticationConfig.isSimpleAuth()) {
+ KerberosUserContext kerberosUserContext = ((KerberosUserContext)
parentContext).deepCopy();
+
kerberosUserContext.setEnableUserImpersonation(enableUserImpersonation);
+ addUserContext(nameIdentifier, kerberosUserContext);
+ return kerberosUserContext;
+ }
+
+ String keytabPath =
+ String.format(
+ GRAVITINO_KEYTAB_FORMAT,
+ catalogInfo.id() + "-" + nameIdentifier.toString().replace(".",
"-"));
+ KerberosUserContext kerberosUserContext =
+ new KerberosUserContext(enableUserImpersonation, keytabPath);
+ kerberosUserContext.initKerberos(properties, configuration,
parentContext == null);
+ addUserContext(nameIdentifier, kerberosUserContext);
+ return kerberosUserContext;
+ } else {
+ throw new RuntimeException("Unsupported authentication type: " +
authenticationType);
+ }
+ }
+
+ abstract UserGroupInformation getUser();
+
+ abstract boolean enableUserImpersonation();
+
+ abstract UserGroupInformation createProxyUser();
+
+ public <T> T doAs(PrivilegedExceptionAction<T> action, NameIdentifier ident)
{
+ UserGroupInformation u = getUser();
+ if (enableUserImpersonation()) {
+ u = createProxyUser();
+ }
+
+ try {
+ return u.doAs(action);
+ } catch (IOException | InterruptedException ioe) {
+ throw new RuntimeException("Failed to operation on entity:" + ident,
ioe);
+ } catch (UndeclaredThrowableException e) {
+ Throwable innerException = e.getCause();
+ if (innerException instanceof PrivilegedActionException) {
+ throw new RuntimeException(innerException.getCause());
+ } else if (innerException instanceof InvocationTargetException) {
+ throw new RuntimeException(innerException.getCause());
+ } else {
+ throw new RuntimeException(innerException);
+ }
+ }
+ }
+}
diff --git
a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java
b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java
index 9ca01edd8..b6ade0f31 100644
---
a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java
+++
b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java
@@ -543,4 +543,104 @@ public class HadoopUserAuthenticationIT extends
AbstractIT {
"hadoop", "fs", "-chown", "-R", "cli_schema", "/user/hadoop/" +
catalogName);
Assertions.assertDoesNotThrow(() ->
catalog.asSchemas().dropSchema(SCHEMA_NAME, true));
}
+
+ @Test
+ void testUserImpersonation() {
+ KerberosTokenProvider provider =
+ KerberosTokenProvider.builder()
+ .withClientPrincipal(GRAVITINO_CLIENT_PRINCIPAL)
+ .withKeyTabFile(new File(TMP_DIR + GRAVITINO_CLIENT_KEYTAB))
+ .build();
+ adminClient =
GravitinoAdminClient.builder(serverUri).withKerberosAuth(provider).build();
+
+ String metalakeName = GravitinoITUtils.genRandomName("metalake");
+ String catalogName = GravitinoITUtils.genRandomName("catalog");
+ GravitinoMetalake gravitinoMetalake =
+ adminClient.createMetalake(metalakeName, null, ImmutableMap.of());
+
+ // Create a catalog
+ Map<String, String> properties = Maps.newHashMap();
+ String localtion = HDFS_URL + "/user/hadoop/" + catalogName;
+
+ properties.put(IMPERSONATION_ENABLE_KEY, "true");
+ properties.put(KEY_TAB_URI_KEY, TMP_DIR + HADOOP_CLIENT_KEYTAB);
+ properties.put(PRINCIPAL_KEY, HADOOP_CLIENT_PRINCIPAL);
+ properties.put(AUTH_TYPE_KEY, "kerberos");
+ properties.put("location", localtion);
+
+ kerberosHiveContainer.executeInContainer(
+ "hadoop", "fs", "-mkdir", "-p", "/user/hadoop/" + catalogName);
+
+ Catalog catalog =
+ gravitinoMetalake.createCatalog(
+ catalogName, Catalog.Type.FILESET, "hadoop", "comment",
properties);
+
+ Map<String, String> schemaProperty = new HashMap<>();
+
+ // Test set schema IMPERSONATION_ENABLE_KEY to true, the final result is:
+ // IMPERSONATION_ENABLE_KEY is true
+ // so the user access HDFS is user 'gravitino_client'
+ schemaProperty.put(IMPERSONATION_ENABLE_KEY, "true");
+ Exception exception =
+ Assertions.assertThrows(
+ Exception.class,
+ () -> catalog.asSchemas().createSchema(SCHEMA_NAME, "comment",
schemaProperty));
+ String exceptionMessage = Throwables.getStackTraceAsString(exception);
+ Assertions.assertTrue(
+ exceptionMessage.contains("Permission denied: user=gravitino_client,
access=WRITE"));
+
+ // Test set schema IMPERSONATION_ENABLE_KEY to false, the final result is:
+ // IMPERSONATION_ENABLE_KEY is false
+ // so the user access HDFS is user 'cli'
+ schemaProperty.put(IMPERSONATION_ENABLE_KEY, "false");
+ exception =
+ Assertions.assertThrows(
+ Exception.class,
+ () -> catalog.asSchemas().createSchema(SCHEMA_NAME, "comment",
schemaProperty));
+ exceptionMessage = Throwables.getStackTraceAsString(exception);
+ Assertions.assertTrue(exceptionMessage.contains("Permission denied:
user=cli, access=WRITE"));
+
+ kerberosHiveContainer.executeInContainer(
+ "hadoop", "fs", "-chown", "-R", "cli", "/user/hadoop/" + catalogName);
+ Assertions.assertDoesNotThrow(
+ () -> catalog.asSchemas().createSchema(SCHEMA_NAME, "comment",
schemaProperty));
+
+ String filesetName = GravitinoITUtils.genRandomName("fileset");
+ Map<String, String> filesetProperty = new HashMap<>();
+ filesetProperty.put(IMPERSONATION_ENABLE_KEY, "true");
+ exception =
+ Assertions.assertThrows(
+ Exception.class,
+ () ->
+ catalog
+ .asFilesetCatalog()
+ .createFileset(
+ NameIdentifier.of(SCHEMA_NAME, filesetName),
+ "comment",
+ Fileset.Type.MANAGED,
+ null,
+ filesetProperty));
+ exceptionMessage = Throwables.getStackTraceAsString(exception);
+ Assertions.assertTrue(
+ exceptionMessage.contains("Permission denied: user=gravitino_client,
access=WRITE"));
+
+ // Line 602 has set the owner of the schema directory to 'cli', if the
IMPERSONATION_ENABLE_KEY
+ // is false, the user is 'cli'
+ filesetProperty.put(IMPERSONATION_ENABLE_KEY, "false");
+ Assertions.assertDoesNotThrow(
+ () ->
+ catalog
+ .asFilesetCatalog()
+ .createFileset(
+ NameIdentifier.of(SCHEMA_NAME, filesetName),
+ "comment",
+ Fileset.Type.MANAGED,
+ null,
+ filesetProperty));
+
+ catalog.asFilesetCatalog().dropFileset(NameIdentifier.of(SCHEMA_NAME,
filesetName));
+ catalog.asSchemas().dropSchema(SCHEMA_NAME, true);
+ gravitinoMetalake.dropCatalog(catalogName);
+ adminClient.dropMetalake(metalakeName);
+ }
}
diff --git a/docs/gravitino-server-config.md b/docs/gravitino-server-config.md
index 6a8e96b8f..e6633f628 100644
--- a/docs/gravitino-server-config.md
+++ b/docs/gravitino-server-config.md
@@ -42,11 +42,16 @@ You can also specify filter parameters by setting
configuration entries of the f
### Storage configuration
+#### Storage backend configuration
+
+Currently, Gravitino only supports JDBC database backend, and the default
implementation is H2 database as it's an embedded database, has no external
dependencies and is very suitable for local development or tests.
+If you are going to use H2 in the production environment, Gravitino will not
guarantee the data consistency and durability. It's highly recommended using
MySQL as the backend database.
+
+The following table lists the storage configuration items:
+
| Configuration item | Description
| Default value
| Required | Since
version |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|--------------------------------------------------|---------------|
| `gravitino.entity.store` | Which entity storage
implementation to use. Only`relational` storage is currently supported.
| `relational`
| No | 0.1.0
|
-| `gravitino.entity.store.kv` | Detailed implementation
of KV storage. `RocksDB` storage is currently supported, and the implementation
is `RocksDBKvBackend`.
|
`RocksDBKvBackend` | No
| 0.1.0 |
-| `gravitino.entity.store.kv.rocksdbPath` | The storage path for
RocksDB storage implementation. It supports both absolute and relative path, if
the value is a relative path, the final path is
`${GRAVITINO_HOME}/${PATH_YOU_HAVA_SET}`, default value is
`${GRAVITINO_HOME}/data/rocksdb` | `${GRAVITINO_HOME}/data/rocksdb` | No
| 0.1.0 |
| `gravitino.entity.serde` | The
serialization/deserialization class used to support entity storage. `proto' is
currently supported.
| `proto` | No
| 0.1.0 |
| `gravitino.entity.store.maxTransactionSkewTimeMs` | The maximum skew time of
transactions in milliseconds.
| `2000`
| No | 0.3.0
|
| `gravitino.entity.store.kv.deleteAfterTimeMs` | It is deprecated since
Gravitino 0.5.0. Please use `gravitino.entity.store.deleteAfterTimeMs` instead.
| `604800000`(7
days) | No | 0.3.0
|
@@ -61,9 +66,13 @@ You can also specify filter parameters by setting
configuration entries of the f
:::caution
-We strongly recommend that you change the default value of
`gravitino.entity.store.kv.rocksdbPath`, as it's under the deployment directory
and future version upgrades may remove it.
+We strongly recommend that you change the default value of
`gravitino.entity.store.relational.storagePath`, as it's under the deployment
directory and future version upgrades may remove it.
:::
+#### Create JDBC backend schema and table
+
+For H2 database, All tables needed by Gravitino are created automatically when
the Gravitino server starts up. For MySQL, you should firstly initialize the
database tables yourself by executing the ddl scripts in the
`${GRAVITINO_HOME}/scripts/mysql/` directory.
+
### Tree lock configuration
Gravitino server uses tree lock to ensure the consistency of the data. The
tree lock is a memory lock (Currently, Gravitino only supports in memory lock)
that can be used to ensure the consistency of the data in Gravitino server. The
configuration items are as follows:
diff --git a/docs/hadoop-catalog.md b/docs/hadoop-catalog.md
index 1cfb95943..ca552091c 100644
--- a/docs/hadoop-catalog.md
+++ b/docs/hadoop-catalog.md
@@ -28,13 +28,25 @@ Besides the [common catalog
properties](./gravitino-server-config.md#gravitino-c
| Property Name | Description
|
Default Value | Required |
Since Version |
|----------------------------------------------------|------------------------------------------------------------------------------------------------|---------------|-------------------------------------------------------------|---------------|
| `location` | The storage location
managed by Hadoop catalog. |
(none) | No |
0.5.0 |
-| `authentication.type` | The type of
authentication for Hadoop catalog, currently we only support `kerberos`,
`simple`. | `simple` | No
| 0.5.1 |
| `authentication.impersonation-enable` | Whether to enable
impersonation for the Hadoop catalog. |
`false` | No |
0.5.1 |
+| `authentication.type` | The type of
authentication for Hadoop catalog, currently we only support `kerberos`,
`simple`. | `simple` | No
| 0.5.1 |
| `authentication.kerberos.principal` | The principal of the
Kerberos authentication |
(none) | required if the value of `authentication.type` is Kerberos. |
0.5.1 |
| `authentication.kerberos.keytab-uri` | The URI of The keytab
for the Kerberos authentication. |
(none) | required if the value of `authentication.type` is Kerberos. |
0.5.1 |
| `authentication.kerberos.check-interval-sec` | The check interval of
Kerberos credential for Hadoop catalog. | 60
| No | 0.5.1
|
| `authentication.kerberos.keytab-fetch-timeout-sec` | The fetch timeout of
retrieving Kerberos keytab from `authentication.kerberos.keytab-uri`. | 60
| No | 0.5.1
|
+
+### Authentication for Hadoop Catalog
+
+The Hadoop catalog supports multi-level authentication to control access,
allowing different authentication settings for the catalog, schema, and
fileset. The priority of authentication settings is as follows: catalog <
schema < fileset. Specifically:
+
+- **Catalog**: The default authentication is `simple`.
+- **Schema**: Inherits the authentication setting from the catalog if not
explicitly set. For more information about schema settings, please refer to
[Schema properties](#schema-properties).
+- **Fileset**: Inherits the authentication setting from the schema if not
explicitly set. For more information about fileset settings, please refer to
[Fileset properties](#fileset-properties).
+
+The default value of `authentication.impersonation-enable` is false, and the
default value for catalogs about this configuration is false, for
+schemas and filesets, the default value is inherited from the parent. Value
set by the user will override the parent value, and the priority mechanism is
the same as authentication.
+
### Catalog operations
Refer to [Catalog
operations](./manage-fileset-metadata-using-gravitino.md#catalog-operations)
for more details.
@@ -47,9 +59,13 @@ The Hadoop catalog supports creating, updating, deleting,
and listing schema.
### Schema properties
-| Property name | Description | Default
value | Required | Since Version |
-|---------------|------------------------------------------------|---------------|----------|---------------|
-| `location` | The storage location managed by Hadoop schema. | (none)
| No | 0.5.0 |
+| Property name | Description
| Default value | Required | Since Version |
+|----------------------------------------------------|----------------------------------------------------------------------------------------------------------------|---------------------------|----------|-----------------|
+| `location` | The storage location
managed by Hadoop schema.
| (none) | No | 0.5.0 |
+| `authentication.impersonation-enable` | Whether to enable
impersonation for this schema of the Hadoop catalog.
| The parent(catalog) value | No | 0.6.0 |
+| `authentication.type` | The type of
authentication for this schema of Hadoop catalog , currently we only support
`kerberos`, `simple`. | The parent(catalog) value | No | 0.6.0 |
+| `authentication.kerberos.principal` | The principal of the
Kerberos authentication for this schema.
| The parent(catalog) value | No | 0.6.0 |
+| `authentication.kerberos.keytab-uri` | The URI of The keytab
for the Kerberos authentication for this scheam.
| The parent(catalog) value | No | 0.6.0 |
### Schema operations
@@ -63,7 +79,12 @@ Refer to [Schema
operation](./manage-fileset-metadata-using-gravitino.md#schema-
### Fileset properties
-None.
+| Property name | Description
| Default value | Required | Since Version |
+|----------------------------------------------------|--------------------------------------------------------------------------------------------------------|--------------------------|----------|-----------------|
+| `authentication.impersonation-enable` | Whether to enable
impersonation for the Hadoop catalog fileset.
| The parent(schema) value | No | 0.6.0 |
+| `authentication.type` | The type of
authentication for Hadoop catalog fileset, currently we only support
`kerberos`, `simple`. | The parent(schema) value | No | 0.6.0 |
+| `authentication.kerberos.principal` | The principal of the
Kerberos authentication for the fileset.
| The parent(schema) value | No | 0.6.0 |
+| `authentication.kerberos.keytab-uri` | The URI of The keytab
for the Kerberos authentication for the fileset.
| The parent(schema) value | No | 0.6.0 |
### Fileset operations
diff --git a/docs/lakehouse-iceberg-catalog.md
b/docs/lakehouse-iceberg-catalog.md
index 3560ba2b1..61e7c4fcf 100644
--- a/docs/lakehouse-iceberg-catalog.md
+++ b/docs/lakehouse-iceberg-catalog.md
@@ -29,6 +29,7 @@ Builds with Hadoop 2.10.x, there may be compatibility issues
when accessing Hado
- Works as a catalog proxy, supporting `HiveCatalog`, `JdbcCatalog` and
`RESTCatalog`.
- Supports DDL operations for Iceberg schemas and tables.
- Doesn't support snapshot or table management operations.
+- Supports Kerberos or simple authentication for Iceberg catalog with Hive
backend.
### Catalog properties