dimas-b commented on code in PR #1287:
URL: https://github.com/apache/polaris/pull/1287#discussion_r2040385651


##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/JdbcMetaStoreManagerFactory.java:
##########
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Supplier;
+import javax.sql.DataSource;
+import org.apache.polaris.core.PolarisCallContext;
+import org.apache.polaris.core.PolarisDefaultDiagServiceImpl;
+import org.apache.polaris.core.PolarisDiagnostics;
+import org.apache.polaris.core.context.CallContext;
+import org.apache.polaris.core.context.RealmContext;
+import org.apache.polaris.core.entity.*;
+import org.apache.polaris.core.persistence.*;
+import org.apache.polaris.core.persistence.bootstrap.RootCredentialsSet;
+import org.apache.polaris.core.persistence.cache.EntityCache;
+import org.apache.polaris.core.persistence.dao.entity.BaseResult;
+import org.apache.polaris.core.persistence.dao.entity.EntityResult;
+import org.apache.polaris.core.persistence.dao.entity.PrincipalSecretsResult;
+import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider;
+import org.apache.polaris.core.storage.cache.StorageCredentialCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The implementation of Configuration interface for configuring the {@link 
PolarisMetaStoreManager}
+ * using a JDBC backed by SQL metastore.
+ */
+public class JdbcMetaStoreManagerFactory implements MetaStoreManagerFactory {

Review Comment:
   Why not extend `LocalPolarisMetaStoreManagerFactory`?



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/JdbcMetaStoreManagerFactory.java:
##########
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Supplier;
+import javax.sql.DataSource;
+import org.apache.polaris.core.PolarisCallContext;
+import org.apache.polaris.core.PolarisDefaultDiagServiceImpl;
+import org.apache.polaris.core.PolarisDiagnostics;
+import org.apache.polaris.core.context.CallContext;
+import org.apache.polaris.core.context.RealmContext;
+import org.apache.polaris.core.entity.*;
+import org.apache.polaris.core.persistence.*;
+import org.apache.polaris.core.persistence.bootstrap.RootCredentialsSet;
+import org.apache.polaris.core.persistence.cache.EntityCache;
+import org.apache.polaris.core.persistence.dao.entity.BaseResult;
+import org.apache.polaris.core.persistence.dao.entity.EntityResult;
+import org.apache.polaris.core.persistence.dao.entity.PrincipalSecretsResult;
+import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider;
+import org.apache.polaris.core.storage.cache.StorageCredentialCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The implementation of Configuration interface for configuring the {@link 
PolarisMetaStoreManager}
+ * using a JDBC backed by SQL metastore.
+ */
+public class JdbcMetaStoreManagerFactory implements MetaStoreManagerFactory {
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(JdbcMetaStoreManagerFactory.class);
+  final Map<String, PolarisMetaStoreManager> metaStoreManagerMap = new 
HashMap<>();
+  final Map<String, StorageCredentialCache> storageCredentialCacheMap = new 
HashMap<>();
+  final Map<String, EntityCache> entityCacheMap = new HashMap<>();
+  final Map<String, Supplier<BasePersistence>> sessionSupplierMap = new 
HashMap<>();
+  protected final PolarisDiagnostics diagServices = new 
PolarisDefaultDiagServiceImpl();
+
+  DataSource ds;
+  private final PolarisDiagnostics diagnostics;
+  PolarisStorageIntegrationProvider storageIntegrationProvider;
+
+  protected JdbcMetaStoreManagerFactory(@Nonnull PolarisDiagnostics 
diagnostics) {
+    this.diagnostics = diagnostics;
+  }
+
+  /**
+   * Subclasses can override this to inject different implementations of 
PolarisMetaStoreManager
+   * into the existing realm-based setup flow.
+   */
+  protected PolarisMetaStoreManager createNewMetaStoreManager() {
+    return new AtomicOperationMetaStoreManager();
+  }
+
+  private void initializeForRealm(
+      RealmContext realmContext, RootCredentialsSet rootCredentialsSet) {
+    DatasourceOperations databaseOperations = new DatasourceOperations(ds);
+    // TODO: see if we need to take script from Quarkus or can we just
+    // use the script committed repo.
+    
databaseOperations.executeScript("scripts/postgres/schema-v1-postgres.sql");
+    sessionSupplierMap.put(
+        realmContext.getRealmIdentifier(),
+        () ->
+            new JdbcBasePersistenceImpl(
+                databaseOperations,
+                secretsGenerator(realmContext, rootCredentialsSet),
+                storageIntegrationProvider));

Review Comment:
   It looks like all realms share the same `DataSource` and schema, but the 
realm ID is not part of primary keys... I guess this will cause data from 
different realms to mix up, won't it?



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/DatasourceOperations.java:
##########
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Objects;
+import javax.sql.DataSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class DatasourceOperations {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(DatasourceOperations.class);
+
+  /** Already exists error * */
+  private static final String ALREADY_EXISTS_SQL_CODE = "42P07";
+
+  /** Integrity constraint * */
+  private static final String CONSTRAINT_VIOLATION_SQL_CODE = "23505";
+
+  private final DataSource datasource;
+
+  public DatasourceOperations(DataSource datasource) {
+    this.datasource = datasource;
+  }
+
+  public void executeScript(String scriptFilePath) {
+    ClassLoader classLoader = DatasourceOperations.class.getClassLoader();
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement()) {
+      BufferedReader reader =
+          new BufferedReader(
+              new InputStreamReader(
+                  
Objects.requireNonNull(classLoader.getResourceAsStream(scriptFilePath)), 
UTF_8));
+      StringBuilder sqlBuffer = new StringBuilder();
+      String line;
+      while ((line = reader.readLine()) != null) {
+        line = line.trim();
+        if (!line.isEmpty() && !line.startsWith("--")) { // Ignore empty lines 
and comments
+          sqlBuffer.append(line).append("\n");
+          if (line.endsWith(";")) { // Execute statement when semicolon is 
found
+            String sql = sqlBuffer.toString().trim();
+            try {
+              int rowsUpdated = statement.executeUpdate(sql);
+              LOGGER.debug("Query {} executed {} rows affected", sql, 
rowsUpdated);
+            } catch (SQLException e) {
+              LOGGER.error("Error executing query {}", sql, e);
+              // re:throw this as unhandled exception
+              throw new RuntimeException(e);
+            }
+            sqlBuffer.setLength(0); // Clear the buffer for the next statement
+          }
+        }
+      }
+    } catch (IOException e) {
+      LOGGER.error("Error reading the script file", e);
+      throw new RuntimeException(e);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing the script file", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  public <T> List<T> executeSelect(String query, Class<T> targetClass) {
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement();
+        ResultSet s = statement.executeQuery(query)) {
+      List<T> results = ResultSetToObjectConverter.convert(s, targetClass);
+      return results.isEmpty() ? null : results;
+    } catch (Exception e) {
+      LOGGER.error("Error executing query {}", query, e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  public int executeUpdate(String query) {
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement()) {
+      return statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing query {}", query, e);
+      return handleException(e);
+    }
+  }
+
+  public int executeUpdate(String query, Statement statement) {
+    LOGGER.debug("Executing query {} within transaction", query);
+    try {
+      return statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing query {}", query, e);
+      return handleException(e);
+    }
+  }
+
+  public void runWithinTransaction(TransactionCallback callback) {
+    Connection connection = null;
+    try {
+      connection = borrowConnection();
+      connection.setAutoCommit(false); // Disable auto-commit to start a 
transaction
+
+      boolean result;
+      try (Statement statement = connection.createStatement()) {
+        result = callback.execute(statement);
+      }
+
+      if (result) {
+        connection.commit(); // Commit the transaction if successful
+      } else {
+        connection.rollback(); // Rollback the transaction if not successful
+      }
+
+    } catch (SQLException e) {

Review Comment:
   What about other `RuntimeExpection`s?



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/JdbcBasePersistenceImpl.java:
##########
@@ -0,0 +1,619 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import org.apache.polaris.core.PolarisCallContext;
+import org.apache.polaris.core.entity.EntityNameLookupRecord;
+import org.apache.polaris.core.entity.PolarisBaseEntity;
+import org.apache.polaris.core.entity.PolarisChangeTrackingVersions;
+import org.apache.polaris.core.entity.PolarisEntityCore;
+import org.apache.polaris.core.entity.PolarisEntityId;
+import org.apache.polaris.core.entity.PolarisEntityType;
+import org.apache.polaris.core.entity.PolarisGrantRecord;
+import org.apache.polaris.core.entity.PolarisPrincipalSecrets;
+import org.apache.polaris.core.persistence.BaseMetaStoreManager;
+import org.apache.polaris.core.persistence.BasePersistence;
+import org.apache.polaris.core.persistence.EntityAlreadyExistsException;
+import org.apache.polaris.core.persistence.IntegrationPersistence;
+import org.apache.polaris.core.persistence.PrincipalSecretsGenerator;
+import org.apache.polaris.core.persistence.RetryOnConcurrencyException;
+import org.apache.polaris.core.storage.PolarisStorageConfigurationInfo;
+import org.apache.polaris.core.storage.PolarisStorageIntegration;
+import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelEntity;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelGrantRecord;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelPrincipalAuthenticationData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class JdbcBasePersistenceImpl implements BasePersistence, 
IntegrationPersistence {
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(JdbcBasePersistenceImpl.class);
+
+  private final DatasourceOperations datasourceOperations;
+  private final PrincipalSecretsGenerator secretsGenerator;
+  private final PolarisStorageIntegrationProvider storageIntegrationProvider;
+
+  public JdbcBasePersistenceImpl(
+      DatasourceOperations databaseOperations,
+      PrincipalSecretsGenerator secretsGenerator,
+      PolarisStorageIntegrationProvider storageIntegrationProvider) {
+    this.datasourceOperations = databaseOperations;
+    this.secretsGenerator = secretsGenerator;
+    this.storageIntegrationProvider = storageIntegrationProvider;
+  }
+
+  @Override
+  public long generateNewId(@Nonnull PolarisCallContext callCtx) {
+    return IdGenerator.idGenerator.nextId();
+  }
+
+  @Override
+  public void writeEntity(
+      @Nonnull PolarisCallContext callCtx,
+      @Nonnull PolarisBaseEntity entity,
+      boolean nameOrParentChanged,
+      PolarisBaseEntity originalEntity) {
+    ModelEntity modelEntity = ModelEntity.fromEntity(entity);
+    String query;
+    if (originalEntity == null) {
+      query = JdbcCrudQueryGenerator.generateInsertQuery(modelEntity, 
ModelEntity.class);
+    } else {
+      Map<String, Object> params = new HashMap<>();
+      params.put("id", originalEntity.getId());
+      params.put("catalog_id", originalEntity.getCatalogId());
+      params.put("entity_version", originalEntity.getEntityVersion());
+      query = JdbcCrudQueryGenerator.generateUpdateQuery(modelEntity, params, 
ModelEntity.class);
+    }
+    int rowsUpdated = datasourceOperations.executeUpdate(query);
+    if (rowsUpdated == -1 && originalEntity == null) {
+      // constraint validation.
+      throw new EntityAlreadyExistsException(entity);
+    } else if (rowsUpdated == 0 && originalEntity != null) {
+      // concurrently row got updated, as version mismatched.
+      throw new RetryOnConcurrencyException("CAS failed");
+    }
+  }
+
+  @Override
+  public void writeEntities(
+      @Nonnull PolarisCallContext callCtx,
+      @Nonnull List<PolarisBaseEntity> entities,
+      List<PolarisBaseEntity> originalEntities) {
+    try {
+      datasourceOperations.runWithinTransaction(
+          statement -> {

Review Comment:
   What's the purpose of the the `statement` parameter? It does not appear to 
be used.



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/JdbcCrudQueryGenerator.java:
##########
@@ -0,0 +1,241 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelGrantRecord;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelPrincipalAuthenticationData;
+
+public class JdbcCrudQueryGenerator {
+
+  private static final Pattern CAMEL_CASE_PATTERN =
+      Pattern.compile("(?<=[a-z0-9])[A-Z]|(?<=[A-Z])[A-Z](?=[a-z])");
+
+  public static String generateSelectQuery(
+      Class<?> entityClass, String filter, Integer limit, Integer offset, 
String orderBy) {
+    String tableName = getTableName(entityClass);
+    List<String> fields = new ArrayList<>();
+
+    for (Field field : entityClass.getDeclaredFields()) {
+      fields.add(camelToSnake(field.getName()));
+    }
+
+    String columns = String.join(", ", fields);
+    StringBuilder query =
+        new StringBuilder("SELECT ").append(columns).append(" FROM 
").append(tableName);
+    if (filter != null && !filter.isEmpty()) {
+      query.append(" WHERE ").append(String.join(" AND ", filter));
+    }
+    return query.toString();
+  }
+
+  public static String generateSelectQuery(
+      Class<?> entityClass,
+      Map<String, Object> whereClause,
+      Integer limit,
+      Integer offset,
+      String orderBy) {
+    String tableName = getTableName(entityClass);
+    List<String> fields = new ArrayList<>();
+
+    for (Field field : entityClass.getDeclaredFields()) {
+      fields.add(camelToSnake(field.getName()));
+    }
+
+    String columns = String.join(", ", fields);
+    StringBuilder query =
+        new StringBuilder("SELECT ").append(columns).append(" FROM 
").append(tableName);
+
+    if (whereClause != null && !whereClause.isEmpty()) {
+      query.append(generateWhereClause(whereClause));
+    }
+
+    if (orderBy != null && !orderBy.isEmpty()) {
+      query.append(" ORDER BY ").append(orderBy);
+    }
+
+    if (limit != null) {
+      query.append(" LIMIT ").append(limit);
+    }
+
+    if (offset != null && limit != null) { // Offset only makes sense with 
limit.
+      query.append(" OFFSET ").append(offset);
+    }
+
+    return query.toString();
+  }
+
+  public static String generateInsertQuery(Object object, Class<?> 
entityClass) {
+    String tableName = getTableName(entityClass);
+    if (object == null || tableName.isEmpty()) {
+      return null; // Or throw an exception
+    }
+
+    Class<?> objectClass = object.getClass();
+    Field[] fields = objectClass.getDeclaredFields();
+    List<String> columnNames = new ArrayList<>();
+    List<String> values = new ArrayList<>();
+
+    for (Field field : fields) {
+      field.setAccessible(true); // Allow access to private fields
+      try {
+        Object value = field.get(object);
+        if (value != null) { // Only include non-null fields
+          columnNames.add(camelToSnake(field.getName()));

Review Comment:
   Would it not be preferable to delegate this to `ModelEntity` (or other value 
classes)? We have a hard dependency between classes and column names anyway, 
but proper delegation will avoid obscure reflection calls and make all 
dependencies explicit.



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/JdbcBasePersistenceImpl.java:
##########
@@ -0,0 +1,619 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import org.apache.polaris.core.PolarisCallContext;
+import org.apache.polaris.core.entity.EntityNameLookupRecord;
+import org.apache.polaris.core.entity.PolarisBaseEntity;
+import org.apache.polaris.core.entity.PolarisChangeTrackingVersions;
+import org.apache.polaris.core.entity.PolarisEntityCore;
+import org.apache.polaris.core.entity.PolarisEntityId;
+import org.apache.polaris.core.entity.PolarisEntityType;
+import org.apache.polaris.core.entity.PolarisGrantRecord;
+import org.apache.polaris.core.entity.PolarisPrincipalSecrets;
+import org.apache.polaris.core.persistence.BaseMetaStoreManager;
+import org.apache.polaris.core.persistence.BasePersistence;
+import org.apache.polaris.core.persistence.EntityAlreadyExistsException;
+import org.apache.polaris.core.persistence.IntegrationPersistence;
+import org.apache.polaris.core.persistence.PrincipalSecretsGenerator;
+import org.apache.polaris.core.persistence.RetryOnConcurrencyException;
+import org.apache.polaris.core.storage.PolarisStorageConfigurationInfo;
+import org.apache.polaris.core.storage.PolarisStorageIntegration;
+import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelEntity;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelGrantRecord;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelPrincipalAuthenticationData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class JdbcBasePersistenceImpl implements BasePersistence, 
IntegrationPersistence {
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(JdbcBasePersistenceImpl.class);
+
+  private final DatasourceOperations datasourceOperations;
+  private final PrincipalSecretsGenerator secretsGenerator;
+  private final PolarisStorageIntegrationProvider storageIntegrationProvider;
+
+  public JdbcBasePersistenceImpl(
+      DatasourceOperations databaseOperations,
+      PrincipalSecretsGenerator secretsGenerator,
+      PolarisStorageIntegrationProvider storageIntegrationProvider) {
+    this.datasourceOperations = databaseOperations;
+    this.secretsGenerator = secretsGenerator;
+    this.storageIntegrationProvider = storageIntegrationProvider;
+  }
+
+  @Override
+  public long generateNewId(@Nonnull PolarisCallContext callCtx) {
+    return IdGenerator.idGenerator.nextId();
+  }
+
+  @Override
+  public void writeEntity(
+      @Nonnull PolarisCallContext callCtx,
+      @Nonnull PolarisBaseEntity entity,
+      boolean nameOrParentChanged,
+      PolarisBaseEntity originalEntity) {
+    ModelEntity modelEntity = ModelEntity.fromEntity(entity);
+    String query;
+    if (originalEntity == null) {
+      query = JdbcCrudQueryGenerator.generateInsertQuery(modelEntity, 
ModelEntity.class);
+    } else {
+      Map<String, Object> params = new HashMap<>();
+      params.put("id", originalEntity.getId());
+      params.put("catalog_id", originalEntity.getCatalogId());
+      params.put("entity_version", originalEntity.getEntityVersion());
+      query = JdbcCrudQueryGenerator.generateUpdateQuery(modelEntity, params, 
ModelEntity.class);
+    }
+    int rowsUpdated = datasourceOperations.executeUpdate(query);
+    if (rowsUpdated == -1 && originalEntity == null) {
+      // constraint validation.
+      throw new EntityAlreadyExistsException(entity);
+    } else if (rowsUpdated == 0 && originalEntity != null) {
+      // concurrently row got updated, as version mismatched.
+      throw new RetryOnConcurrencyException("CAS failed");
+    }
+  }
+
+  @Override
+  public void writeEntities(
+      @Nonnull PolarisCallContext callCtx,
+      @Nonnull List<PolarisBaseEntity> entities,
+      List<PolarisBaseEntity> originalEntities) {
+    try {
+      datasourceOperations.runWithinTransaction(
+          statement -> {
+            for (int i = 0; i < entities.size(); i++) {
+              PolarisBaseEntity entity = entities.get(i);
+              ModelEntity modelEntity = ModelEntity.fromEntity(entity);
+
+              // first, check if the entity has already been created, in which 
case we will simply
+              // return it.
+              PolarisBaseEntity entityFound =
+                  lookupEntity(
+                      callCtx, entity.getCatalogId(), entity.getId(), 
entity.getTypeCode());
+              if (entityFound != null) {
+                // probably the client retried, simply return it
+                // TODO: Check correctness of returning entityFound vs entity 
here. It may have
+                // already
+                // been updated after the creation.
+                continue;
+              }
+              // lookup by name
+              EntityNameLookupRecord exists =
+                  lookupEntityIdAndSubTypeByName(
+                      callCtx,
+                      entity.getCatalogId(),
+                      entity.getParentId(),
+                      entity.getTypeCode(),
+                      entity.getName());
+              if (exists != null) {
+                throw new EntityAlreadyExistsException(entity);
+              }
+              String query;
+              if (originalEntities == null || originalEntities.get(i) == null) 
{
+                query = 
JdbcCrudQueryGenerator.generateInsertQuery(modelEntity, ModelEntity.class);
+              } else {
+                // CAS
+                Map<String, Object> params = new HashMap<>();
+                params.put("id", originalEntities.get(i).getId());
+                params.put("catalog_id", 
originalEntities.get(i).getCatalogId());
+                params.put("entity_version", 
originalEntities.get(i).getEntityVersion());
+                query =
+                    JdbcCrudQueryGenerator.generateUpdateQuery(
+                        modelEntity, params, ModelEntity.class);
+              }
+              int rowsUpdated = datasourceOperations.executeUpdate(query);

Review Comment:
   This call appears to run the `query` on a different connection than the 
connection that has the in-progress transaction.



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/DatasourceOperations.java:
##########
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Objects;
+import javax.sql.DataSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class DatasourceOperations {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(DatasourceOperations.class);
+
+  /** Already exists error * */
+  private static final String ALREADY_EXISTS_SQL_CODE = "42P07";
+
+  /** Integrity constraint * */
+  private static final String CONSTRAINT_VIOLATION_SQL_CODE = "23505";
+
+  private final DataSource datasource;
+
+  public DatasourceOperations(DataSource datasource) {
+    this.datasource = datasource;
+  }
+
+  public void executeScript(String scriptFilePath) {
+    ClassLoader classLoader = DatasourceOperations.class.getClassLoader();
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement()) {
+      BufferedReader reader =
+          new BufferedReader(
+              new InputStreamReader(
+                  
Objects.requireNonNull(classLoader.getResourceAsStream(scriptFilePath)), 
UTF_8));
+      StringBuilder sqlBuffer = new StringBuilder();
+      String line;
+      while ((line = reader.readLine()) != null) {
+        line = line.trim();
+        if (!line.isEmpty() && !line.startsWith("--")) { // Ignore empty lines 
and comments
+          sqlBuffer.append(line).append("\n");
+          if (line.endsWith(";")) { // Execute statement when semicolon is 
found
+            String sql = sqlBuffer.toString().trim();
+            try {
+              int rowsUpdated = statement.executeUpdate(sql);
+              LOGGER.debug("Query {} executed {} rows affected", sql, 
rowsUpdated);
+            } catch (SQLException e) {
+              LOGGER.error("Error executing query {}", sql, e);
+              // re:throw this as unhandled exception
+              throw new RuntimeException(e);
+            }
+            sqlBuffer.setLength(0); // Clear the buffer for the next statement
+          }
+        }
+      }
+    } catch (IOException e) {
+      LOGGER.error("Error reading the script file", e);
+      throw new RuntimeException(e);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing the script file", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  public <T> List<T> executeSelect(String query, Class<T> targetClass) {
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement();
+        ResultSet s = statement.executeQuery(query)) {
+      List<T> results = ResultSetToObjectConverter.convert(s, targetClass);
+      return results.isEmpty() ? null : results;
+    } catch (Exception e) {
+      LOGGER.error("Error executing query {}", query, e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  public int executeUpdate(String query) {
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement()) {
+      return statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing query {}", query, e);
+      return handleException(e);
+    }
+  }
+
+  public int executeUpdate(String query, Statement statement) {
+    LOGGER.debug("Executing query {} within transaction", query);
+    try {
+      return statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing query {}", query, e);
+      return handleException(e);
+    }
+  }
+
+  public void runWithinTransaction(TransactionCallback callback) {
+    Connection connection = null;
+    try {
+      connection = borrowConnection();
+      connection.setAutoCommit(false); // Disable auto-commit to start a 
transaction
+
+      boolean result;
+      try (Statement statement = connection.createStatement()) {
+        result = callback.execute(statement);
+      }
+
+      if (result) {
+        connection.commit(); // Commit the transaction if successful
+      } else {
+        connection.rollback(); // Rollback the transaction if not successful
+      }
+
+    } catch (SQLException e) {
+      if (connection != null) {
+        try {
+          connection.rollback(); // Rollback on exception
+        } catch (SQLException ex) {
+          LOGGER.error("Error rolling back transaction", ex);
+        }
+      }
+      LOGGER.error("Caught Error while executing transaction", e);
+      handleException(e);
+    } finally {
+      if (connection != null) {
+        try {
+          connection.setAutoCommit(true); // Restore auto-commit
+          connection.close();

Review Comment:
   Could we use try-with-resources instead of closing the connection explicitly?



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/DatasourceOperations.java:
##########
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Objects;
+import javax.sql.DataSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class DatasourceOperations {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(DatasourceOperations.class);
+
+  /** Already exists error * */
+  private static final String ALREADY_EXISTS_SQL_CODE = "42P07";
+
+  /** Integrity constraint * */
+  private static final String CONSTRAINT_VIOLATION_SQL_CODE = "23505";
+
+  private final DataSource datasource;
+
+  public DatasourceOperations(DataSource datasource) {
+    this.datasource = datasource;
+  }
+
+  public void executeScript(String scriptFilePath) {
+    ClassLoader classLoader = DatasourceOperations.class.getClassLoader();
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement()) {
+      BufferedReader reader =
+          new BufferedReader(
+              new InputStreamReader(
+                  
Objects.requireNonNull(classLoader.getResourceAsStream(scriptFilePath)), 
UTF_8));
+      StringBuilder sqlBuffer = new StringBuilder();
+      String line;
+      while ((line = reader.readLine()) != null) {
+        line = line.trim();
+        if (!line.isEmpty() && !line.startsWith("--")) { // Ignore empty lines 
and comments
+          sqlBuffer.append(line).append("\n");
+          if (line.endsWith(";")) { // Execute statement when semicolon is 
found
+            String sql = sqlBuffer.toString().trim();
+            try {
+              int rowsUpdated = statement.executeUpdate(sql);
+              LOGGER.debug("Query {} executed {} rows affected", sql, 
rowsUpdated);
+            } catch (SQLException e) {
+              LOGGER.error("Error executing query {}", sql, e);
+              // re:throw this as unhandled exception
+              throw new RuntimeException(e);
+            }
+            sqlBuffer.setLength(0); // Clear the buffer for the next statement
+          }
+        }
+      }
+    } catch (IOException e) {
+      LOGGER.error("Error reading the script file", e);
+      throw new RuntimeException(e);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing the script file", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  public <T> List<T> executeSelect(String query, Class<T> targetClass) {
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement();
+        ResultSet s = statement.executeQuery(query)) {
+      List<T> results = ResultSetToObjectConverter.convert(s, targetClass);
+      return results.isEmpty() ? null : results;
+    } catch (Exception e) {
+      LOGGER.error("Error executing query {}", query, e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  public int executeUpdate(String query) {
+    try (Connection connection = borrowConnection();
+        Statement statement = connection.createStatement()) {
+      return statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing query {}", query, e);
+      return handleException(e);
+    }
+  }
+
+  public int executeUpdate(String query, Statement statement) {
+    LOGGER.debug("Executing query {} within transaction", query);
+    try {
+      return statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOGGER.error("Error executing query {}", query, e);
+      return handleException(e);
+    }
+  }
+
+  public void runWithinTransaction(TransactionCallback callback) {
+    Connection connection = null;
+    try {
+      connection = borrowConnection();
+      connection.setAutoCommit(false); // Disable auto-commit to start a 
transaction
+
+      boolean result;
+      try (Statement statement = connection.createStatement()) {
+        result = callback.execute(statement);
+      }
+
+      if (result) {
+        connection.commit(); // Commit the transaction if successful
+      } else {
+        connection.rollback(); // Rollback the transaction if not successful
+      }
+
+    } catch (SQLException e) {
+      if (connection != null) {
+        try {
+          connection.rollback(); // Rollback on exception
+        } catch (SQLException ex) {
+          LOGGER.error("Error rolling back transaction", ex);
+        }
+      }
+      LOGGER.error("Caught Error while executing transaction", e);
+      handleException(e);

Review Comment:
   If `handleException` returns `-1`, the caller of `runWithinTransaction` is 
not notified of the error. Is that intended?



##########
extension/persistence/relational-jdbc/src/main/java/org/apache/polaris/extension/persistence/relational/jdbc/JdbcBasePersistenceImpl.java:
##########
@@ -0,0 +1,619 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.polaris.extension.persistence.relational.jdbc;
+
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import org.apache.polaris.core.PolarisCallContext;
+import org.apache.polaris.core.entity.EntityNameLookupRecord;
+import org.apache.polaris.core.entity.PolarisBaseEntity;
+import org.apache.polaris.core.entity.PolarisChangeTrackingVersions;
+import org.apache.polaris.core.entity.PolarisEntityCore;
+import org.apache.polaris.core.entity.PolarisEntityId;
+import org.apache.polaris.core.entity.PolarisEntityType;
+import org.apache.polaris.core.entity.PolarisGrantRecord;
+import org.apache.polaris.core.entity.PolarisPrincipalSecrets;
+import org.apache.polaris.core.persistence.BaseMetaStoreManager;
+import org.apache.polaris.core.persistence.BasePersistence;
+import org.apache.polaris.core.persistence.EntityAlreadyExistsException;
+import org.apache.polaris.core.persistence.IntegrationPersistence;
+import org.apache.polaris.core.persistence.PrincipalSecretsGenerator;
+import org.apache.polaris.core.persistence.RetryOnConcurrencyException;
+import org.apache.polaris.core.storage.PolarisStorageConfigurationInfo;
+import org.apache.polaris.core.storage.PolarisStorageIntegration;
+import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelEntity;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelGrantRecord;
+import 
org.apache.polaris.extension.persistence.relational.jdbc.models.ModelPrincipalAuthenticationData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class JdbcBasePersistenceImpl implements BasePersistence, 
IntegrationPersistence {
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(JdbcBasePersistenceImpl.class);
+
+  private final DatasourceOperations datasourceOperations;
+  private final PrincipalSecretsGenerator secretsGenerator;
+  private final PolarisStorageIntegrationProvider storageIntegrationProvider;
+
+  public JdbcBasePersistenceImpl(
+      DatasourceOperations databaseOperations,
+      PrincipalSecretsGenerator secretsGenerator,
+      PolarisStorageIntegrationProvider storageIntegrationProvider) {
+    this.datasourceOperations = databaseOperations;
+    this.secretsGenerator = secretsGenerator;
+    this.storageIntegrationProvider = storageIntegrationProvider;
+  }
+
+  @Override
+  public long generateNewId(@Nonnull PolarisCallContext callCtx) {
+    return IdGenerator.idGenerator.nextId();
+  }
+
+  @Override
+  public void writeEntity(
+      @Nonnull PolarisCallContext callCtx,
+      @Nonnull PolarisBaseEntity entity,
+      boolean nameOrParentChanged,
+      PolarisBaseEntity originalEntity) {
+    ModelEntity modelEntity = ModelEntity.fromEntity(entity);
+    String query;
+    if (originalEntity == null) {
+      query = JdbcCrudQueryGenerator.generateInsertQuery(modelEntity, 
ModelEntity.class);
+    } else {
+      Map<String, Object> params = new HashMap<>();
+      params.put("id", originalEntity.getId());
+      params.put("catalog_id", originalEntity.getCatalogId());
+      params.put("entity_version", originalEntity.getEntityVersion());
+      query = JdbcCrudQueryGenerator.generateUpdateQuery(modelEntity, params, 
ModelEntity.class);
+    }
+    int rowsUpdated = datasourceOperations.executeUpdate(query);
+    if (rowsUpdated == -1 && originalEntity == null) {
+      // constraint validation.
+      throw new EntityAlreadyExistsException(entity);
+    } else if (rowsUpdated == 0 && originalEntity != null) {
+      // concurrently row got updated, as version mismatched.
+      throw new RetryOnConcurrencyException("CAS failed");
+    }
+  }
+
+  @Override
+  public void writeEntities(
+      @Nonnull PolarisCallContext callCtx,
+      @Nonnull List<PolarisBaseEntity> entities,
+      List<PolarisBaseEntity> originalEntities) {
+    try {
+      datasourceOperations.runWithinTransaction(
+          statement -> {
+            for (int i = 0; i < entities.size(); i++) {
+              PolarisBaseEntity entity = entities.get(i);
+              ModelEntity modelEntity = ModelEntity.fromEntity(entity);
+
+              // first, check if the entity has already been created, in which 
case we will simply
+              // return it.
+              PolarisBaseEntity entityFound =
+                  lookupEntity(
+                      callCtx, entity.getCatalogId(), entity.getId(), 
entity.getTypeCode());
+              if (entityFound != null) {
+                // probably the client retried, simply return it
+                // TODO: Check correctness of returning entityFound vs entity 
here. It may have
+                // already
+                // been updated after the creation.
+                continue;
+              }
+              // lookup by name
+              EntityNameLookupRecord exists =
+                  lookupEntityIdAndSubTypeByName(
+                      callCtx,
+                      entity.getCatalogId(),
+                      entity.getParentId(),
+                      entity.getTypeCode(),
+                      entity.getName());
+              if (exists != null) {
+                throw new EntityAlreadyExistsException(entity);
+              }
+              String query;
+              if (originalEntities == null || originalEntities.get(i) == null) 
{
+                query = 
JdbcCrudQueryGenerator.generateInsertQuery(modelEntity, ModelEntity.class);
+              } else {
+                // CAS
+                Map<String, Object> params = new HashMap<>();
+                params.put("id", originalEntities.get(i).getId());
+                params.put("catalog_id", 
originalEntities.get(i).getCatalogId());
+                params.put("entity_version", 
originalEntities.get(i).getEntityVersion());
+                query =
+                    JdbcCrudQueryGenerator.generateUpdateQuery(
+                        modelEntity, params, ModelEntity.class);
+              }
+              int rowsUpdated = datasourceOperations.executeUpdate(query);
+              boolean isUpdate = (originalEntities != null && 
originalEntities.get(i) != null);
+              if (rowsUpdated == -1 && !isUpdate) {
+                // constrain validation exception.
+                throw new EntityAlreadyExistsException(entity);
+              } else if (rowsUpdated == 0 && isUpdate) {
+                throw new RetryOnConcurrencyException("CAS failed");
+              }
+            }
+            return true;
+          });
+    } catch (Exception e) {
+      LOGGER.error("Error executing transaction {}", e.getMessage());
+      throw e;
+    }
+  }
+
+  @Override
+  public void writeToGrantRecords(
+      @Nonnull PolarisCallContext callCtx, @Nonnull PolarisGrantRecord 
grantRec) {
+    ModelGrantRecord modelGrantRecord = 
ModelGrantRecord.fromGrantRecord(grantRec);
+    String query =
+        JdbcCrudQueryGenerator.generateInsertQuery(modelGrantRecord, 
ModelGrantRecord.class);
+    datasourceOperations.executeUpdate(query);
+  }
+
+  @Override
+  public void deleteEntity(@Nonnull PolarisCallContext callCtx, @Nonnull 
PolarisBaseEntity entity) {
+    ModelEntity modelEntity = ModelEntity.fromEntity(entity);
+    Map<String, Object> params = new HashMap<>();
+    params.put("id", modelEntity.getId());
+    params.put("catalog_id", modelEntity.getCatalogId());
+    datasourceOperations.executeUpdate(
+        JdbcCrudQueryGenerator.generateDeleteQuery(params, ModelEntity.class));
+  }
+
+  @Override
+  public void deleteFromGrantRecords(
+      @Nonnull PolarisCallContext callCtx, @Nonnull PolarisGrantRecord 
grantRec) {
+    ModelGrantRecord modelGrantRecord = 
ModelGrantRecord.fromGrantRecord(grantRec);
+    String query =
+        JdbcCrudQueryGenerator.generateDeleteQuery(modelGrantRecord, 
ModelGrantRecord.class);
+    datasourceOperations.executeUpdate(query);
+  }
+
+  @Override
+  public void deleteAllEntityGrantRecords(
+      @Nonnull PolarisCallContext callCtx,
+      PolarisEntityCore entity,
+      @Nonnull List<PolarisGrantRecord> grantsOnGrantee,
+      @Nonnull List<PolarisGrantRecord> grantsOnSecurable) {
+    // generate where clause
+    StringBuilder granteeCondition = new StringBuilder("(grantee_id, 
grantee_catalog_id) IN (");
+    granteeCondition.append("(" + entity.getId() + ", " + 
entity.getCatalogId() + ")");
+    granteeCondition.append(",");
+    // extra , removed
+    granteeCondition.deleteCharAt(granteeCondition.length() - 1);
+    granteeCondition.append(")");
+
+    StringBuilder securableCondition =
+        new StringBuilder("(securable_catalog_id, securable_id) IN (");
+
+    String in = "(" + entity.getCatalogId() + ", " + entity.getId() + ")";
+    securableCondition.append(in);
+    securableCondition.append(",");
+
+    // extra , removed
+    securableCondition.deleteCharAt(securableCondition.length() - 1);
+    securableCondition.append(")");
+
+    String whereClause = " WHERE " + granteeCondition + " OR " + 
securableCondition;
+    datasourceOperations.executeUpdate(
+        JdbcCrudQueryGenerator.generateDeleteQuery(ModelGrantRecord.class, 
whereClause));
+  }
+
+  @Override
+  public void deleteAll(@Nonnull PolarisCallContext callCtx) {
+    
datasourceOperations.executeUpdate(JdbcCrudQueryGenerator.generateDeleteAll(ModelEntity.class));
+    datasourceOperations.executeUpdate(
+        JdbcCrudQueryGenerator.generateDeleteAll(ModelGrantRecord.class));
+    
datasourceOperations.executeUpdate(JdbcCrudQueryGenerator.generateDeleteAll(ModelEntity.class));
+  }
+
+  @Override
+  public PolarisBaseEntity lookupEntity(
+      @Nonnull PolarisCallContext callCtx, long catalogId, long entityId, int 
typeCode) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("catalog_id", catalogId);
+    params.put("id", entityId);
+    params.put("type_code", typeCode);
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelEntity.class, params, null, null, "last_update_timestamp");
+    return getPolarisBaseEntity(query);
+  }
+
+  @Override
+  public PolarisBaseEntity lookupEntityByName(
+      @Nonnull PolarisCallContext callCtx,
+      long catalogId,
+      long parentId,
+      int typeCode,
+      String name) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("catalog_id", catalogId);
+    params.put("parent_id", parentId);
+    params.put("type_code", typeCode);
+
+    if (name != null) {
+      params.put("name", name);
+    }
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelEntity.class, params, 1, null, "last_update_timestamp");
+    return getPolarisBaseEntity(query);
+  }
+
+  @Nullable
+  private PolarisBaseEntity getPolarisBaseEntity(String query) {
+    List<ModelEntity> results = datasourceOperations.executeSelect(query, 
ModelEntity.class);
+    return results == null || results.isEmpty() ? null : 
ModelEntity.toEntity(results.get(0));
+  }
+
+  @Nonnull
+  @Override
+  public List<PolarisBaseEntity> lookupEntities(
+      @Nonnull PolarisCallContext callCtx, List<PolarisEntityId> entityIds) {
+    if (entityIds == null || entityIds.isEmpty()) return new ArrayList<>();
+    StringBuilder condition = new StringBuilder("(catalog_id, id) IN (");
+    for (int i = 0; i < entityIds.size(); i++) {
+      String in = "(" + entityIds.get(i).getCatalogId() + ", " + 
entityIds.get(i).getId() + ")";
+      condition.append(in);
+      condition.append(",");
+    }
+    // extra , removed
+    condition.deleteCharAt(condition.length() - 1);
+    condition.append(")");
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelEntity.class,
+            entityIds.isEmpty() ? "" : String.valueOf(condition),
+            null,
+            null,
+            null);
+    List<ModelEntity> results = datasourceOperations.executeSelect(query, 
ModelEntity.class);
+    return results == null
+        ? Collections.emptyList()
+        : 
results.stream().map(ModelEntity::toEntity).collect(Collectors.toList());
+  }
+
+  @Nonnull
+  @Override
+  public List<PolarisChangeTrackingVersions> lookupEntityVersions(
+      @Nonnull PolarisCallContext callCtx, List<PolarisEntityId> entityIds) {
+    Map<PolarisEntityId, ModelEntity> idToEntityMap =
+        lookupEntities(callCtx, entityIds).stream()
+            .collect(
+                Collectors.toMap(
+                    entry -> new PolarisEntityId(entry.getCatalogId(), 
entry.getId()),
+                    ModelEntity::fromEntity));
+    return entityIds.stream()
+        .map(
+            entityId -> {
+              ModelEntity entity = idToEntityMap.getOrDefault(entityId, null);
+              return entity == null
+                  ? null
+                  : new PolarisChangeTrackingVersions(
+                      entity.getEntityVersion(), 
entity.getGrantRecordsVersion());
+            })
+        .collect(Collectors.toList());
+  }
+
+  @Nonnull
+  @Override
+  public List<EntityNameLookupRecord> listEntities(
+      @Nonnull PolarisCallContext callCtx,
+      long catalogId,
+      long parentId,
+      @Nonnull PolarisEntityType entityType) {
+    return listEntities(
+        callCtx,
+        catalogId,
+        parentId,
+        entityType,
+        Integer.MAX_VALUE,
+        e -> true,
+        EntityNameLookupRecord::new);
+  }
+
+  @Nonnull
+  @Override
+  public List<EntityNameLookupRecord> listEntities(
+      @Nonnull PolarisCallContext callCtx,
+      long catalogId,
+      long parentId,
+      @Nonnull PolarisEntityType entityType,
+      @Nonnull Predicate<PolarisBaseEntity> entityFilter) {
+    return listEntities(
+        callCtx,
+        catalogId,
+        parentId,
+        entityType,
+        Integer.MAX_VALUE,
+        entityFilter,
+        EntityNameLookupRecord::new);
+  }
+
+  @Nonnull
+  @Override
+  public <T> List<T> listEntities(
+      @Nonnull PolarisCallContext callCtx,
+      long catalogId,
+      long parentId,
+      PolarisEntityType entityType,
+      int limit,
+      @Nonnull Predicate<PolarisBaseEntity> entityFilter,
+      @Nonnull Function<PolarisBaseEntity, T> transformer) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("catalog_id", catalogId);
+    params.put("parent_id", parentId);
+    params.put("type_code", entityType.getCode());
+
+    // Limit can't be pushed down, due to client side filtering
+    // absence of transaction.
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelEntity.class, params, null, null, "last_update_timestamp");
+    List<ModelEntity> results = datasourceOperations.executeSelect(query, 
ModelEntity.class);
+    return results == null
+        ? Collections.emptyList()
+        : results.stream()
+            .map(ModelEntity::toEntity)
+            .filter(entityFilter)
+            .map(transformer)
+            .limit(limit)
+            .collect(Collectors.toList());
+  }
+
+  @Override
+  public int lookupEntityGrantRecordsVersion(
+      @Nonnull PolarisCallContext callCtx, long catalogId, long entityId) {
+
+    Map<String, Object> params = new HashMap<>();
+    params.put("catalog_id", catalogId);
+    params.put("id", entityId);
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelEntity.class, params, null, null, "last_update_timestamp");
+    PolarisBaseEntity b = getPolarisBaseEntity(query);
+    return b == null ? 0 : b.getGrantRecordsVersion();
+  }
+
+  @Override
+  public PolarisGrantRecord lookupGrantRecord(
+      @Nonnull PolarisCallContext callCtx,
+      long securableCatalogId,
+      long securableId,
+      long granteeCatalogId,
+      long granteeId,
+      int privilegeCode) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("securable_catalog_id", securableCatalogId);
+    params.put("securable_id", securableId);
+    params.put("grantee_catalog_id", granteeCatalogId);
+    params.put("grantee_id", granteeId);
+    params.put("privilege_code", privilegeCode);
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelGrantRecord.class, params, null, null, null);
+    List<ModelGrantRecord> results =
+        datasourceOperations.executeSelect(query, ModelGrantRecord.class);
+    return results == null ? null : 
ModelGrantRecord.toGrantRecord(results.get(0));
+  }
+
+  @Nonnull
+  @Override
+  public List<PolarisGrantRecord> loadAllGrantRecordsOnSecurable(
+      @Nonnull PolarisCallContext callCtx, long securableCatalogId, long 
securableId) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("securable_catalog_id", securableCatalogId);
+    params.put("securable_id", securableId);
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelGrantRecord.class, params, null, null, null);
+    List<ModelGrantRecord> results =
+        datasourceOperations.executeSelect(query, ModelGrantRecord.class);
+    return results == null
+        ? Collections.emptyList()
+        : 
results.stream().map(ModelGrantRecord::toGrantRecord).collect(Collectors.toList());
+  }
+
+  @Nonnull
+  @Override
+  public List<PolarisGrantRecord> loadAllGrantRecordsOnGrantee(
+      @Nonnull PolarisCallContext callCtx, long granteeCatalogId, long 
granteeId) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("grantee_catalog_id", granteeCatalogId);
+    params.put("grantee_id", granteeId);
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelGrantRecord.class, params, null, null, null);
+    List<ModelGrantRecord> results =
+        datasourceOperations.executeSelect(query, ModelGrantRecord.class);
+    return results == null
+        ? Collections.emptyList()
+        : 
results.stream().map(ModelGrantRecord::toGrantRecord).collect(Collectors.toList());
+  }
+
+  @Override
+  public boolean hasChildren(
+      @Nonnull PolarisCallContext callContext,
+      PolarisEntityType optionalEntityType,
+      long catalogId,
+      long parentId) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("catalog_id", catalogId);
+    params.put("parent_id", parentId);
+    if (optionalEntityType != null) {
+      params.put("type_code", optionalEntityType.getCode());
+    }
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelEntity.class, params, null, null, "last_update_timestamp");
+    List<ModelEntity> results = datasourceOperations.executeSelect(query, 
ModelEntity.class);
+
+    return results != null && !results.isEmpty();
+  }
+
+  @Nullable
+  @Override
+  public PolarisPrincipalSecrets loadPrincipalSecrets(
+      @Nonnull PolarisCallContext callCtx, @Nonnull String clientId) {
+    Map<String, Object> params = new HashMap<>();
+    params.put("principal_client_id", clientId);
+    String query =
+        JdbcCrudQueryGenerator.generateSelectQuery(
+            ModelPrincipalAuthenticationData.class, params, null, null, null);
+    List<ModelPrincipalAuthenticationData> results =
+        datasourceOperations.executeSelect(query, 
ModelPrincipalAuthenticationData.class);
+    return results == null || results.isEmpty()
+        ? null
+        : results.stream()
+            .map(ModelPrincipalAuthenticationData::toPrincipalSecrets)
+            .toList()
+            .getFirst();
+  }
+
+  @Nonnull
+  @Override
+  public PolarisPrincipalSecrets generateNewPrincipalSecrets(
+      @Nonnull PolarisCallContext callCtx, @Nonnull String principalName, long 
principalId) {
+    // ensure principal client id is unique
+    PolarisPrincipalSecrets principalSecrets;
+    ModelPrincipalAuthenticationData lookupPrincipalSecrets;
+    do {
+      // generate new random client id and secrets
+      principalSecrets = secretsGenerator.produceSecrets(principalName, 
principalId);

Review Comment:
   These secrets are not necessarily random. If not random and we have a clash, 
this loop will never end, right?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@polaris.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to