This is an automated email from the ASF dual-hosted git repository.

jshao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git


The following commit(s) were added to refs/heads/main by this push:
     new 1721497e9d [#8835][#8836] feat(lakehouseCatalog): supports catalog and 
schema operations for lakehouse catalog (#9130)
1721497e9d is described below

commit 1721497e9d206d45dde080a7bd5bc65d3e6b09d4
Author: Junda Yang <[email protected]>
AuthorDate: Mon Nov 17 13:17:08 2025 -0800

    [#8835][#8836] feat(lakehouseCatalog): supports catalog and schema 
operations for lakehouse catalog (#9130)
    
    ### What changes were proposed in this pull request?
    
    1. **Schema Operations**: Implemented full CRUD support
       - `createSchema`: Create new schemas with properties
       - `listSchemas`: List all schemas in the catalog
       - `loadSchema`: Load schema metadata
       - `alterSchema`: Modify schema properties
       - `dropSchema`: Delete schemas (with cascade option)
    
    2. **Managed Storage Framework**:
       - Added `location` property for catalog-level storage configuration
    - Added `location` property for schema-level storage configuration
    (overrides catalog-level)
       - Implemented path handling with trailing slash normalization
    
    3. **Dependencies**:
       - Added `hadoop3-client-api` dependency for filesystem operations
    
    
    ### Why are the changes needed?
    
    This PR builds upon the generic lakehouse catalog framework (#8833) by
    implementing the schema layer, which is essential for:
    
    1. **Foundation for Table Operations**: Schema management is a
    prerequisite for table operations (planned in future PRs)
    2. **Managed Metadata**: Stores schema metadata in Gravitino's entity
    store, enabling centralized management
    3. **Storage Flexibility**: Allows per-schema storage configuration,
    supporting multi-tenant and multi-storage scenarios
    4. **Catalog Completeness**: Moves the catalog from framework-only to
    functional for schema operations
    
    Without schema operations, users cannot organize their lakehouse data
    into logical namespaces, making the catalog incomplete for practical
    use.
    
    Fix: #8835 #8836
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, users can now:
    - Create generic lakehouse catalogs with `provider: "generic-lakehouse"`
    - Perform schema operations: create, list, load, alter, and drop schemas
    - Configure storage locations at catalog and schema levels using
    `location` property
    - Use various storage backends (local filesystem, HDFS, S3, GCS, ADLS)
    via `gravitino.bypass.*` properties
    
    **Note**: Table operations are not included in this PR and will return
    `UnsupportedOperationException`. This is expected and will be
    implemented in subsequent PRs.
    
    ### How was this patch tested?
    
    Unit test
    
    ---------
    
    Co-authored-by: mchades <[email protected]>
---
 .../catalog-generic-lakehouse/build.gradle.kts     |   1 +
 .../GenericLakehouseCatalogCapability.java         |  24 +--
 .../GenericLakehouseCatalogOperations.java         | 120 ++++++++---
 .../GenericLakehouseCatalogPropertiesMetadata.java |  26 ++-
 .../GenericLakehouseSchemaPropertiesMetadata.java  |  24 ++-
 .../TestGenericLakehouseCatalogOperations.java     | 232 +++++++++++++++++++++
 6 files changed, 371 insertions(+), 56 deletions(-)

diff --git a/catalogs/catalog-generic-lakehouse/build.gradle.kts 
b/catalogs/catalog-generic-lakehouse/build.gradle.kts
index c3ad842ac3..fceac14304 100644
--- a/catalogs/catalog-generic-lakehouse/build.gradle.kts
+++ b/catalogs/catalog-generic-lakehouse/build.gradle.kts
@@ -42,6 +42,7 @@ dependencies {
   implementation(libs.commons.io)
   implementation(libs.commons.lang3)
   implementation(libs.guava)
+  implementation(libs.hadoop3.client.api)
 
   annotationProcessor(libs.lombok)
 
diff --git 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
index 08015f7fce..412b82d6a7 100644
--- 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
+++ 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
@@ -24,27 +24,7 @@ import 
org.apache.gravitino.connector.capability.CapabilityResult;
 public class GenericLakehouseCatalogCapability implements Capability {
 
   @Override
-  public CapabilityResult columnNotNull() {
-    throw new UnsupportedOperationException(
-        "Not implemented yet: 
GenericLakehouseCatalogCapability.columnNotNull");
-  }
-
-  @Override
-  public CapabilityResult columnDefaultValue() {
-    throw new UnsupportedOperationException(
-        "Not implemented yet: 
GenericLakehouseCatalogCapability.columnDefaultValue");
-  }
-
-  @Override
-  public CapabilityResult caseSensitiveOnName(Scope scope) {
-    switch (scope) {
-      case SCHEMA:
-      case TABLE:
-      case COLUMN:
-        throw new UnsupportedOperationException(
-            "Not implemented yet: 
GenericLakehouseCatalogCapability.caseSensitiveOnName");
-      default:
-        return CapabilityResult.SUPPORTED;
-    }
+  public CapabilityResult managedStorage(Scope scope) {
+    return CapabilityResult.SUPPORTED;
   }
 }
diff --git 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
index 64743488a0..1ddb177f17 100644
--- 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
+++ 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
@@ -18,9 +18,18 @@
  */
 package org.apache.gravitino.catalog.lakehouse;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.Map;
+import java.util.Optional;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.gravitino.Catalog;
+import org.apache.gravitino.EntityStore;
+import org.apache.gravitino.GravitinoEnv;
 import org.apache.gravitino.NameIdentifier;
 import org.apache.gravitino.Namespace;
+import org.apache.gravitino.Schema;
+import org.apache.gravitino.SchemaChange;
+import org.apache.gravitino.catalog.ManagedSchemaOperations;
 import org.apache.gravitino.connector.CatalogInfo;
 import org.apache.gravitino.connector.CatalogOperations;
 import org.apache.gravitino.connector.HasPropertyMetadata;
@@ -39,11 +48,24 @@ import 
org.apache.gravitino.rel.expressions.distributions.Distribution;
 import org.apache.gravitino.rel.expressions.sorts.SortOrder;
 import org.apache.gravitino.rel.expressions.transforms.Transform;
 import org.apache.gravitino.rel.indexes.Index;
+import org.apache.hadoop.fs.Path;
 
-/** Operations for interacting with a generic lakehouse catalog in Apache 
Gravitino. */
+/**
+ * Operations for interacting with a generic lakehouse catalog in Apache 
Gravitino.
+ *
+ * <p>This catalog provides a unified interface for managing lakehouse table 
formats. It is designed
+ * to be extensible and can support various table formats through a common 
interface.
+ */
 public class GenericLakehouseCatalogOperations
     implements CatalogOperations, SupportsSchemas, TableCatalog {
 
+  private static final String SLASH = "/";
+
+  private final ManagedSchemaOperations managedSchemaOps;
+
+  @SuppressWarnings("unused") // todo: remove this after implementing table 
operations
+  private Optional<Path> catalogLakehouseLocation;
+
   /**
    * Initializes the generic lakehouse catalog operations with the provided 
configuration.
    *
@@ -56,7 +78,30 @@ public class GenericLakehouseCatalogOperations
   public void initialize(
       Map<String, String> conf, CatalogInfo info, HasPropertyMetadata 
propertiesMetadata)
       throws RuntimeException {
-    // TODO: Implement initialization logic
+    String catalogLocation =
+        (String)
+            propertiesMetadata
+                .catalogPropertiesMetadata()
+                .getOrDefault(conf, 
GenericLakehouseCatalogPropertiesMetadata.LAKEHOUSE_LOCATION);
+    this.catalogLakehouseLocation =
+        StringUtils.isNotBlank(catalogLocation)
+            ? 
Optional.of(catalogLocation).map(this::ensureTrailingSlash).map(Path::new)
+            : Optional.empty();
+  }
+
+  public GenericLakehouseCatalogOperations() {
+    this(GravitinoEnv.getInstance().entityStore());
+  }
+
+  @VisibleForTesting
+  GenericLakehouseCatalogOperations(EntityStore store) {
+    this.managedSchemaOps =
+        new ManagedSchemaOperations() {
+          @Override
+          protected EntityStore store() {
+            return store;
+          }
+        };
   }
 
   @Override
@@ -65,54 +110,60 @@ public class GenericLakehouseCatalogOperations
   @Override
   public void testConnection(
       NameIdentifier catalogIdent,
-      org.apache.gravitino.Catalog.Type type,
+      Catalog.Type type,
       String provider,
       String comment,
-      Map<String, String> properties)
-      throws Exception {
-    throw new UnsupportedOperationException("Not implemented yet.");
+      Map<String, String> properties) {
+    // No-op for generic lakehouse catalog.
   }
 
   @Override
-  public org.apache.gravitino.NameIdentifier[] 
listSchemas(org.apache.gravitino.Namespace namespace)
-      throws NoSuchCatalogException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+  public NameIdentifier[] listSchemas(Namespace namespace) throws 
NoSuchCatalogException {
+    return managedSchemaOps.listSchemas(namespace);
   }
 
   @Override
-  public org.apache.gravitino.Schema createSchema(
-      org.apache.gravitino.NameIdentifier ident, String comment, Map<String, 
String> properties)
+  public Schema createSchema(NameIdentifier ident, String comment, Map<String, 
String> properties)
       throws NoSuchCatalogException, SchemaAlreadyExistsException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    return managedSchemaOps.createSchema(ident, comment, properties);
   }
 
   @Override
-  public org.apache.gravitino.Schema 
loadSchema(org.apache.gravitino.NameIdentifier ident)
-      throws NoSuchSchemaException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+  public Schema loadSchema(NameIdentifier ident) throws NoSuchSchemaException {
+    return managedSchemaOps.loadSchema(ident);
   }
 
   @Override
-  public org.apache.gravitino.Schema alterSchema(
-      org.apache.gravitino.NameIdentifier ident, 
org.apache.gravitino.SchemaChange... changes)
+  public Schema alterSchema(NameIdentifier ident, SchemaChange... changes)
       throws NoSuchSchemaException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    return managedSchemaOps.alterSchema(ident, changes);
   }
 
   @Override
-  public boolean dropSchema(org.apache.gravitino.NameIdentifier ident, boolean 
cascade)
-      throws NonEmptySchemaException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+  public boolean dropSchema(NameIdentifier ident, boolean cascade) throws 
NonEmptySchemaException {
+    return managedSchemaOps.dropSchema(ident, cascade);
   }
 
+  // ==================== Table Operations (In Development) 
====================
+  // TODO: Implement table operations in subsequent releases
+  // See: https://github.com/apache/gravitino/issues/8838
+  // The table operations will delegate to format-specific implementations
+  // (e.g., LanceCatalogOperations for Lance tables)
+
   @Override
   public NameIdentifier[] listTables(Namespace namespace) throws 
NoSuchSchemaException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    // TODO(#8838): Implement table listing
+    throw new UnsupportedOperationException(
+        "Table operations are not yet implemented. "
+            + "This feature is planned for a future release.");
   }
 
   @Override
   public Table loadTable(NameIdentifier ident) throws NoSuchTableException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    // TODO(#8838): Implement table loading
+    throw new UnsupportedOperationException(
+        "Table operations are not yet implemented. "
+            + "This feature is planned for a future release.");
   }
 
   @Override
@@ -126,17 +177,34 @@ public class GenericLakehouseCatalogOperations
       SortOrder[] sortOrders,
       Index[] indexes)
       throws NoSuchSchemaException, TableAlreadyExistsException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    // TODO(#8838): Implement table creation
+    // This should:
+    // 1. Determine table format from properties
+    // 2. Delegate to format-specific implementation (e.g., 
LanceCatalogOperations)
+    // 3. Store metadata in Gravitino entity store
+    throw new UnsupportedOperationException(
+        "Table operations are not yet implemented. "
+            + "This feature is planned for a future release.");
   }
 
   @Override
   public Table alterTable(NameIdentifier ident, TableChange... changes)
       throws NoSuchTableException, IllegalArgumentException {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    // TODO(#8838): Implement table alteration
+    throw new UnsupportedOperationException(
+        "Table operations are not yet implemented. "
+            + "This feature is planned for a future release.");
   }
 
   @Override
   public boolean dropTable(NameIdentifier ident) {
-    throw new UnsupportedOperationException("Not implemented yet.");
+    // TODO(#8838): Implement table dropping
+    throw new UnsupportedOperationException(
+        "Table operations are not yet implemented. "
+            + "This feature is planned for a future release.");
+  }
+
+  private String ensureTrailingSlash(String path) {
+    return path.endsWith(SLASH) ? path : path + SLASH;
   }
 }
diff --git 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
index 18543bd0a3..4877634f67 100644
--- 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
+++ 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
@@ -19,18 +19,36 @@
 
 package org.apache.gravitino.catalog.lakehouse;
 
-import com.google.common.collect.ImmutableMap;
+import static 
org.apache.gravitino.connector.PropertyEntry.stringOptionalPropertyEntry;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import java.util.List;
 import java.util.Map;
 import org.apache.gravitino.connector.BaseCatalogPropertiesMetadata;
 import org.apache.gravitino.connector.PropertyEntry;
 
 public class GenericLakehouseCatalogPropertiesMetadata extends 
BaseCatalogPropertiesMetadata {
 
-  private static final Map<String, PropertyEntry<?>> 
GENERIC_LAKEHOUSE_CATALOG_PROPERTY_ENTRIES =
-      ImmutableMap.<String, PropertyEntry<?>>builder().build();
+  public static final String LAKEHOUSE_LOCATION = "location";
+
+  private static final Map<String, PropertyEntry<?>> PROPERTIES_METADATA;
+
+  static {
+    List<PropertyEntry<?>> propertyEntries =
+        ImmutableList.of(
+            stringOptionalPropertyEntry(
+                LAKEHOUSE_LOCATION,
+                "The root directory of the lakehouse catalog.",
+                false /* immutable */,
+                null, /* defaultValue */
+                false /* hidden */));
+
+    PROPERTIES_METADATA = Maps.uniqueIndex(propertyEntries, 
PropertyEntry::getName);
+  }
 
   @Override
   protected Map<String, PropertyEntry<?>> specificPropertyEntries() {
-    return GENERIC_LAKEHOUSE_CATALOG_PROPERTY_ENTRIES;
+    return PROPERTIES_METADATA;
   }
 }
diff --git 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
index 05da8443cd..e3f6653f78 100644
--- 
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
+++ 
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
@@ -18,20 +18,36 @@
  */
 package org.apache.gravitino.catalog.lakehouse;
 
-import com.google.common.collect.ImmutableMap;
+import static 
org.apache.gravitino.connector.PropertyEntry.stringOptionalPropertyEntry;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import java.util.List;
 import java.util.Map;
 import org.apache.gravitino.connector.BasePropertiesMetadata;
 import org.apache.gravitino.connector.PropertyEntry;
 
 public class GenericLakehouseSchemaPropertiesMetadata extends 
BasePropertiesMetadata {
-  private static final Map<String, PropertyEntry<?>> propertiesMetadata;
+  public static final String LAKEHOUSE_LOCATION =
+      GenericLakehouseCatalogPropertiesMetadata.LAKEHOUSE_LOCATION;
+
+  private static final Map<String, PropertyEntry<?>> PROPERTIES_METADATA;
 
   static {
-    propertiesMetadata = ImmutableMap.of();
+    List<PropertyEntry<?>> propertyEntries =
+        ImmutableList.of(
+            stringOptionalPropertyEntry(
+                LAKEHOUSE_LOCATION,
+                "The root directory of the lakehouse schema.",
+                false /* immutable */,
+                null, /* defaultValue */
+                false /* hidden */));
+
+    PROPERTIES_METADATA = Maps.uniqueIndex(propertyEntries, 
PropertyEntry::getName);
   }
 
   @Override
   protected Map<String, PropertyEntry<?>> specificPropertyEntries() {
-    return propertiesMetadata;
+    return PROPERTIES_METADATA;
   }
 }
diff --git 
a/catalogs/catalog-generic-lakehouse/src/test/java/org/apache/gravitino/catalog/lakehouse/TestGenericLakehouseCatalogOperations.java
 
b/catalogs/catalog-generic-lakehouse/src/test/java/org/apache/gravitino/catalog/lakehouse/TestGenericLakehouseCatalogOperations.java
new file mode 100644
index 0000000000..67887c2f7a
--- /dev/null
+++ 
b/catalogs/catalog-generic-lakehouse/src/test/java/org/apache/gravitino/catalog/lakehouse/TestGenericLakehouseCatalogOperations.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.catalog.lakehouse;
+
+import static org.apache.gravitino.Configs.DEFAULT_ENTITY_RELATIONAL_STORE;
+import static 
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER;
+import static 
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_MAX_CONNECTIONS;
+import static 
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_PASSWORD;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_PATH;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_URL;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_USER;
+import static 
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_WAIT_MILLISECONDS;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_STORE;
+import static org.apache.gravitino.Configs.ENTITY_STORE;
+import static org.apache.gravitino.Configs.RELATIONAL_ENTITY_STORE;
+import static org.apache.gravitino.Configs.STORE_DELETE_AFTER_TIME;
+import static org.apache.gravitino.Configs.STORE_TRANSACTION_MAX_SKEW_TIME;
+import static org.apache.gravitino.Configs.VERSION_RETENTION_COUNT;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import org.apache.commons.io.FileUtils;
+import org.apache.gravitino.Catalog;
+import org.apache.gravitino.Config;
+import org.apache.gravitino.Configs;
+import org.apache.gravitino.EntityStore;
+import org.apache.gravitino.EntityStoreFactory;
+import org.apache.gravitino.NameIdentifier;
+import org.apache.gravitino.Namespace;
+import org.apache.gravitino.Schema;
+import org.apache.gravitino.StringIdentifier;
+import org.apache.gravitino.exceptions.NoSuchCatalogException;
+import org.apache.gravitino.exceptions.NoSuchSchemaException;
+import org.apache.gravitino.exceptions.SchemaAlreadyExistsException;
+import org.apache.gravitino.meta.AuditInfo;
+import org.apache.gravitino.meta.BaseMetalake;
+import org.apache.gravitino.meta.CatalogEntity;
+import org.apache.gravitino.meta.SchemaVersion;
+import org.apache.gravitino.storage.IdGenerator;
+import org.apache.gravitino.storage.RandomIdGenerator;
+import org.apache.gravitino.utils.NameIdentifierUtil;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+
+public class TestGenericLakehouseCatalogOperations {
+  private static final String STORE_PATH =
+      "/tmp/gravitino_test_entityStore_" + 
UUID.randomUUID().toString().replace("-", "");
+  private static final String METALAKE_NAME = "metalake_for_lakehouse_test";
+  private static final String CATALOG_NAME = "lakehouse_catalog_test";
+
+  private static EntityStore store;
+  private static IdGenerator idGenerator;
+  private static GenericLakehouseCatalogOperations ops;
+
+  @BeforeAll
+  public static void setUp() throws IOException {
+    Config config = Mockito.mock(Config.class);
+    when(config.get(ENTITY_STORE)).thenReturn(RELATIONAL_ENTITY_STORE);
+    
when(config.get(ENTITY_RELATIONAL_STORE)).thenReturn(DEFAULT_ENTITY_RELATIONAL_STORE);
+    
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PATH)).thenReturn(STORE_PATH);
+
+    // The following properties are used to create the JDBC connection; they 
are just for test, in
+    // the real world, they will be set automatically by the configuration 
file if you set
+    // ENTITY_RELATIONAL_STORE as EMBEDDED_ENTITY_RELATIONAL_STORE.
+    when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_URL))
+        .thenReturn(String.format("jdbc:h2:%s;DB_CLOSE_DELAY=-1;MODE=MYSQL", 
STORE_PATH));
+    
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_USER)).thenReturn("gravitino");
+    
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PASSWORD)).thenReturn("gravitino");
+    
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER)).thenReturn("org.h2.Driver");
+    
Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_MAX_CONNECTIONS)).thenReturn(100);
+    
Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_WAIT_MILLISECONDS)).thenReturn(1000L);
+
+    File f = FileUtils.getFile(STORE_PATH);
+    f.deleteOnExit();
+
+    when(config.get(VERSION_RETENTION_COUNT)).thenReturn(1L);
+    when(config.get(STORE_TRANSACTION_MAX_SKEW_TIME)).thenReturn(1000L);
+    when(config.get(STORE_DELETE_AFTER_TIME)).thenReturn(20 * 60 * 1000L);
+    Mockito.when(config.get(Configs.CACHE_ENABLED)).thenReturn(false);
+
+    store = EntityStoreFactory.createEntityStore(config);
+    store.initialize(config);
+    idGenerator = RandomIdGenerator.INSTANCE;
+
+    // Create the metalake and catalog
+    AuditInfo auditInfo =
+        
AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build();
+    BaseMetalake metalake =
+        BaseMetalake.builder()
+            .withId(idGenerator.nextId())
+            .withName(METALAKE_NAME)
+            .withVersion(SchemaVersion.V_0_1)
+            .withAuditInfo(auditInfo)
+            .withName(METALAKE_NAME)
+            .build();
+    store.put(metalake, false);
+
+    CatalogEntity catalog =
+        CatalogEntity.builder()
+            .withId(idGenerator.nextId())
+            .withName(CATALOG_NAME)
+            .withNamespace(Namespace.of(METALAKE_NAME))
+            .withProvider("generic-lakehouse")
+            .withType(Catalog.Type.RELATIONAL)
+            .withAuditInfo(auditInfo)
+            .build();
+    store.put(catalog, false);
+
+    ops = new GenericLakehouseCatalogOperations(store);
+  }
+
+  @AfterAll
+  public static void tearDown() throws IOException {
+    ops.close();
+    store.close();
+    FileUtils.deleteDirectory(new File(STORE_PATH));
+  }
+
+  @Test
+  public void testSchemaOperations() {
+    String schemaName = randomSchemaName();
+    NameIdentifier schemaIdent =
+        NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME, schemaName);
+    StringIdentifier stringId = StringIdentifier.fromId(idGenerator.nextId());
+    Map<String, String> properties = 
StringIdentifier.newPropertiesWithId(stringId, null);
+
+    ops.createSchema(schemaIdent, "schema comment", properties);
+    Schema loadedSchema = ops.loadSchema(schemaIdent);
+
+    Assertions.assertEquals(schemaName, loadedSchema.name());
+    Assertions.assertEquals("schema comment", loadedSchema.comment());
+    Assertions.assertEquals(properties, loadedSchema.properties());
+
+    // Test create schema with the same name
+    Assertions.assertThrows(
+        SchemaAlreadyExistsException.class,
+        () -> ops.createSchema(schemaIdent, "schema comment", properties));
+
+    // Test create schema in a non-existent catalog
+    Assertions.assertThrows(
+        NoSuchCatalogException.class,
+        () ->
+            ops.createSchema(
+                NameIdentifierUtil.ofSchema(METALAKE_NAME, 
"non-existent-catalog", schemaName),
+                "schema comment",
+                properties));
+
+    // Test load a non-existent schema
+    Assertions.assertThrows(
+        NoSuchSchemaException.class,
+        () ->
+            ops.loadSchema(
+                NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME, 
"non-existent-schema")));
+
+    // Test load a non-existent schema in a non-existent catalog
+    Assertions.assertThrows(
+        NoSuchSchemaException.class,
+        () ->
+            ops.loadSchema(
+                NameIdentifierUtil.ofSchema(
+                    METALAKE_NAME, "non-existent-catalog", 
"non-existent-schema")));
+
+    // Create another schema
+    String schemaName2 = randomSchemaName();
+    NameIdentifier schemaIdent2 =
+        NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME, schemaName2);
+    StringIdentifier stringId2 = StringIdentifier.fromId(idGenerator.nextId());
+    Map<String, String> properties2 = 
StringIdentifier.newPropertiesWithId(stringId2, null);
+
+    ops.createSchema(schemaIdent2, "schema comment 2", properties2);
+
+    // Test list schemas
+    NameIdentifier[] idents = ops.listSchemas(Namespace.of(METALAKE_NAME, 
CATALOG_NAME));
+
+    Set<NameIdentifier> resultSet = 
Arrays.stream(idents).collect(Collectors.toSet());
+    Assertions.assertTrue(resultSet.contains(schemaIdent));
+    Assertions.assertTrue(resultSet.contains(schemaIdent2));
+
+    // Test list schemas in a non-existent catalog
+    Assertions.assertThrows(
+        NoSuchCatalogException.class,
+        () -> ops.listSchemas(Namespace.of(METALAKE_NAME, 
"non-existent-catalog")));
+
+    // Test drop schema
+    Assertions.assertTrue(ops.dropSchema(schemaIdent, false));
+    Assertions.assertFalse(ops.dropSchema(schemaIdent, false));
+    Assertions.assertTrue(ops.dropSchema(schemaIdent2, false));
+    Assertions.assertFalse(ops.dropSchema(schemaIdent2, false));
+
+    // Test drop non-existent schema
+    Assertions.assertFalse(
+        ops.dropSchema(
+            NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME, 
"non-existent-schema"),
+            false));
+
+    // Test drop schema in a non-existent catalog
+    Assertions.assertFalse(
+        ops.dropSchema(
+            NameIdentifierUtil.ofSchema(METALAKE_NAME, "non-existent-catalog", 
schemaName2),
+            false));
+  }
+
+  private String randomSchemaName() {
+    return "schema_" + UUID.randomUUID().toString().replace("-", "");
+  }
+}

Reply via email to