FANNG1 commented on code in PR #9010:
URL: https://github.com/apache/gravitino/pull/9010#discussion_r2493438567


##########
iceberg/iceberg-rest-server/src/main/java/org/apache/gravitino/iceberg/service/metrics/IcebergMetricsManager.java:
##########
@@ -156,21 +160,22 @@ IcebergMetricsStore getIcebergMetricsStore() {
 
   private void writeMetrics() {
     while (!Thread.currentThread().isInterrupted()) {
-      MetricsReport metricsReport;
+      MetricsReportWrapper metricsReport;
       try {
         metricsReport = queue.take();
       } catch (InterruptedException e) {
         LOG.warn("Iceberg Metrics writer thread is interrupted.");
         break;
       }
-      if (metricsReport != null) {
-        doRecordMetric(metricsReport);
-      }
+
+      doRecordMetric(metricsReport.getNamespace(), 
metricsReport.getMetricsReport());

Review Comment:
   Any reason to remove the nullability check?



##########
iceberg/iceberg-rest-server/src/main/java/org/apache/gravitino/iceberg/service/metrics/JDBCMetricsStore.java:
##########
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.iceberg.service.metrics;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.time.Instant;
+import java.util.Map;
+import java.util.function.Consumer;
+import org.apache.gravitino.catalog.lakehouse.iceberg.IcebergPropertiesUtils;
+import org.apache.gravitino.json.JsonUtils;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.jdbc.JdbcClientPool;
+import org.apache.iceberg.jdbc.UncheckedInterruptedException;
+import org.apache.iceberg.jdbc.UncheckedSQLException;
+import org.apache.iceberg.metrics.CommitReport;
+import org.apache.iceberg.metrics.CounterResult;
+import org.apache.iceberg.metrics.MetricsReport;
+import org.apache.iceberg.metrics.ScanReport;
+import org.apache.iceberg.metrics.TimerResult;
+
+public class JDBCMetricsStore implements IcebergMetricsStore {
+  public static final String ICEBERG_METRICS_STORE_JDBC_NAME = "jdbc";
+  private static final String URI = "uri";
+  private static final String INSERT_COMMIT_REPORT_METRICS_SQL =
+      "INSERT INTO commit_metrics_report ("
+          + "timestamp, namespace, table_name, snapshot_id, sequence_number, 
operation,"
+          + "added_data_files, removed_data_files, total_data_files,"
+          + "added_delete_files, added_equality_delete_files,  
added_positional_delete_files, "
+          + "removed_delete_files, removed_equality_delete_files, 
removed_positional_delete_files, total_delete_files,"
+          + "added_records, removed_records, total_records,"
+          + "added_file_size_bytes, removed_file_size_bytes, 
total_file_size_bytes,"
+          + "added_positional_deletes, removed_positional_deletes, 
total_positional_deletes,"
+          + "added_equality_deletes, removed_equality_deletes, 
total_equality_deletes,"
+          + "manifests_created, manifests_replaced, manifests_kept, 
manifest_entries_processed,"
+          + "added_dvs, removed_dvs,"
+          + "total_duration_ms, attempts, metadata"
+          + ") VALUES "
+          + "(?, ?, ?, ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?, ?,"
+          + " ?, ?,"
+          + " ?, ?, ?);";
+
+  private static final String INSERT_SCAN_REPORT_METRICS_SQL =
+      "INSERT INTO scan_metrics_report ("
+          + "timestamp, namespace, table_name, snapshot_id, schema_id, "
+          + "filter, metadata, projected_field_ids, projected_field_names, "
+          + "equality_delete_files, indexed_delete_files, 
positional_delete_files, "
+          + "result_data_files, result_delete_files, "
+          + "scanned_data_manifests, scanned_delete_manifests, "
+          + "skipped_data_files, skipped_data_manifests, "
+          + "skipped_delete_files, skipped_delete_manifests, "
+          + "total_data_manifests, total_delete_file_size_in_bytes, "
+          + "total_delete_manifests, total_file_size_in_bytes,"
+          + "total_planning_duration_ms) VALUES "
+          + "(?, ?, ?, ?, ?,"
+          + "?, ?, ?, ?,"
+          + "?, ? ,?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?);";
+
+  private static final String DELETE_EXPIRED_SCAN_METRICS_SQL =
+      "DELETE FROM scan_metrics_report WHERE timestamp < ?; ";
+
+  private static final String DELETE_EXPIRED_COMMIT_METRICS_SQL =
+      "DELETE FROM commit_metrics_report WHERE timestamp < ?;";
+
+  @VisibleForTesting JdbcClientPool connections;
+
+  @Override
+  public void init(Map<String, String> properties) throws IOException {
+    String uri = properties.get(URI);
+    Preconditions.checkArgument(uri != null, "JDBC metrics store requires a 
\"%s\" property", URI);
+
+    connections =
+        new JdbcClientPool(uri, 
IcebergPropertiesUtils.toIcebergCatalogProperties(properties));
+  }
+
+  @Override
+  public void recordMetric(Namespace namespace, MetricsReport metricsReport) 
throws IOException {

Review Comment:
   Besides `namespace`, should we record catalog too?



##########
iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/service/metrics/TestJdbcMetricsStore.java:
##########
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.iceberg.service.metrics;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import java.io.File;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.io.FileUtils;
+import org.apache.gravitino.integration.test.container.ContainerSuite;
+import org.apache.gravitino.integration.test.util.TestDatabaseName;
+import org.apache.iceberg.ClientPool;
+import org.apache.iceberg.SnapshotSummary;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.metrics.CommitMetrics;
+import org.apache.iceberg.metrics.CommitMetricsResult;
+import org.apache.iceberg.metrics.CommitReport;
+import org.apache.iceberg.metrics.DefaultMetricsContext;
+import org.apache.iceberg.metrics.ImmutableCommitReport;
+import org.apache.iceberg.metrics.ImmutableScanReport;
+import org.apache.iceberg.metrics.ScanMetrics;
+import org.apache.iceberg.metrics.ScanMetricsResult;
+import org.apache.iceberg.metrics.ScanReport;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+@Tag("gravitino-docker-test")
+public class TestJdbcMetricsStore {
+
+  private static final String CURRENT_SCRIPT_VERSION = "1.1.0";
+  ContainerSuite containerSuite = ContainerSuite.getInstance();
+
+  @Test
+  public void testJdbcMetricsStore() throws Exception {
+    // Start container
+    containerSuite.startMySQLContainer(TestDatabaseName.MYSQL_JDBC_BACKEND);
+    containerSuite.startPostgreSQLContainer(TestDatabaseName.PG_JDBC_BACKEND);
+
+    // Prepare test configurations
+    Map<String, Map<String, String>> dbProperties = Maps.newHashMap();
+    Map<String, String> h2Properties = Maps.newHashMap();
+    h2Properties.put("jdbc-driver", "org.h2.Driver");
+    h2Properties.put("uri", "jdbc:h2:mem:testdb;DB_CLOSE_DELAY=-1;MODE=MYSQL");
+    dbProperties.put("h2", h2Properties);
+
+    Map<String, String> mysqlProperties = Maps.newHashMap();
+    mysqlProperties.put("jdbc-driver", "com.mysql.cj.jdbc.Driver");
+    mysqlProperties.put(
+        "uri", 
containerSuite.getMySQLContainer().getJdbcUrl(TestDatabaseName.MYSQL_JDBC_BACKEND));
+    mysqlProperties.put("jdbc-user", 
containerSuite.getMySQLContainer().getUsername());
+    mysqlProperties.put("jdbc-password", 
containerSuite.getMySQLContainer().getPassword());
+    dbProperties.put("mysql", mysqlProperties);
+    Map<String, String> postgresqlProperties = Maps.newHashMap();
+    postgresqlProperties.put("jdbc-driver", "org.postgresql.Driver");
+    postgresqlProperties.put("uri", 
containerSuite.getPostgreSQLContainer().getJdbcUrl());
+    postgresqlProperties.put("jdbc-user", 
containerSuite.getPostgreSQLContainer().getUsername());
+    postgresqlProperties.put(
+        "jdbc-password", 
containerSuite.getPostgreSQLContainer().getPassword());
+    dbProperties.put("postgresql", postgresqlProperties);
+
+    CommitMetrics commitMetrics = CommitMetrics.of(new 
DefaultMetricsContext());
+    commitMetrics.totalDuration().record(100, TimeUnit.SECONDS);
+    commitMetrics.attempts().increment(4);
+    Map<String, String> snapshotSummary =
+        ImmutableMap.<String, String>builder()
+            .put(SnapshotSummary.ADDED_FILES_PROP, "1")
+            .put(SnapshotSummary.DELETED_FILES_PROP, "2")
+            .put(SnapshotSummary.TOTAL_DATA_FILES_PROP, "3")
+            .put(SnapshotSummary.ADDED_DELETE_FILES_PROP, "4")
+            .put(SnapshotSummary.ADD_EQ_DELETE_FILES_PROP, "5")
+            .put(SnapshotSummary.ADD_POS_DELETE_FILES_PROP, "6")
+            .put(SnapshotSummary.REMOVED_POS_DELETE_FILES_PROP, "7")
+            .put(SnapshotSummary.REMOVED_EQ_DELETE_FILES_PROP, "8")
+            .put(SnapshotSummary.REMOVED_DELETE_FILES_PROP, "9")
+            .put(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "10")
+            .put(SnapshotSummary.ADDED_RECORDS_PROP, "11")
+            .put(SnapshotSummary.DELETED_RECORDS_PROP, "12")
+            .put(SnapshotSummary.TOTAL_RECORDS_PROP, "13")
+            .put(SnapshotSummary.ADDED_FILE_SIZE_PROP, "14")
+            .put(SnapshotSummary.REMOVED_FILE_SIZE_PROP, "15")
+            .put(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "16")
+            .put(SnapshotSummary.ADDED_POS_DELETES_PROP, "17")
+            .put(SnapshotSummary.ADDED_EQ_DELETES_PROP, "18")
+            .put(SnapshotSummary.REMOVED_POS_DELETES_PROP, "19")
+            .put(SnapshotSummary.REMOVED_EQ_DELETES_PROP, "20")
+            .put(SnapshotSummary.TOTAL_POS_DELETES_PROP, "21")
+            .put(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "22")
+            .build();
+
+    String tableName = "tableName";
+    CommitReport commitReport =
+        ImmutableCommitReport.builder()
+            .tableName(tableName)
+            .snapshotId(23L)
+            .operation("DELETE")
+            .sequenceNumber(4L)
+            .commitMetrics(CommitMetricsResult.from(commitMetrics, 
snapshotSummary))
+            .build();
+
+    ScanMetrics scanMetrics = ScanMetrics.of(new DefaultMetricsContext());
+    scanMetrics.totalPlanningDuration().record(10, TimeUnit.MINUTES);
+    scanMetrics.resultDataFiles().increment(5L);
+    scanMetrics.resultDeleteFiles().increment(5L);
+    scanMetrics.scannedDataManifests().increment(5L);
+    scanMetrics.totalFileSizeInBytes().increment(1024L);
+    scanMetrics.totalDataManifests().increment(5L);
+
+    int schemaId = 4;
+    List<Integer> fieldIds = Arrays.asList(1, 2);
+    List<String> fieldNames = Arrays.asList("c1", "c2");
+
+    ScanReport scanReport =
+        ImmutableScanReport.builder()
+            .tableName(tableName)
+            .snapshotId(23L)
+            .filter(Expressions.alwaysTrue())
+            .schemaId(schemaId)
+            .projectedFieldIds(fieldIds)
+            .projectedFieldNames(fieldNames)
+            .scanMetrics(ScanMetricsResult.fromScanMetrics(scanMetrics))
+            .build();
+
+    for (Map.Entry<String, Map<String, String>> entry : 
dbProperties.entrySet()) {
+      JDBCMetricsStore metricsStore = new JDBCMetricsStore();
+      metricsStore.init(entry.getValue());
+
+      String gravitinoHome = System.getenv("GRAVITINO_ROOT_DIR");
+      String mysqlContent =
+          FileUtils.readFileToString(
+              new File(
+                  gravitinoHome
+                      + String.format(
+                          "/scripts/%s/iceberg-metrics-schema-%s-%s.sql",
+                          entry.getKey(), CURRENT_SCRIPT_VERSION, 
entry.getKey())),
+              "UTF-8");
+
+      String[] sqls =
+          Arrays.stream(mysqlContent.split(";"))
+              .map(String::trim)
+              .filter(s -> !s.isEmpty())
+              .toArray(String[]::new);
+      // Init the store to create tables
+      for (String sql : sqls) {
+        metricsStore.execute(sql);
+      }
+
+      metricsStore.recordMetric(Namespace.of("a"), commitReport);
+      metricsStore.recordMetric(Namespace.of("b"), scanReport);
+
+      String countSql = "SELECT COUNT(*) AS total FROM commit_metrics_report";

Review Comment:
   Besides the count number test, could you add some metric content test, like 
datafile number, etc?



##########
iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/service/metrics/TestJdbcMetricsStore.java:
##########
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.iceberg.service.metrics;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import java.io.File;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.io.FileUtils;
+import org.apache.gravitino.integration.test.container.ContainerSuite;
+import org.apache.gravitino.integration.test.util.TestDatabaseName;
+import org.apache.iceberg.ClientPool;
+import org.apache.iceberg.SnapshotSummary;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.metrics.CommitMetrics;
+import org.apache.iceberg.metrics.CommitMetricsResult;
+import org.apache.iceberg.metrics.CommitReport;
+import org.apache.iceberg.metrics.DefaultMetricsContext;
+import org.apache.iceberg.metrics.ImmutableCommitReport;
+import org.apache.iceberg.metrics.ImmutableScanReport;
+import org.apache.iceberg.metrics.ScanMetrics;
+import org.apache.iceberg.metrics.ScanMetricsResult;
+import org.apache.iceberg.metrics.ScanReport;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+@Tag("gravitino-docker-test")
+public class TestJdbcMetricsStore {
+
+  private static final String CURRENT_SCRIPT_VERSION = "1.1.0";
+  ContainerSuite containerSuite = ContainerSuite.getInstance();
+
+  @Test
+  public void testJdbcMetricsStore() throws Exception {
+    // Start container
+    containerSuite.startMySQLContainer(TestDatabaseName.MYSQL_JDBC_BACKEND);

Review Comment:
   Could you move the environment setup logic out of the test method to make 
the code simpler?



##########
docs/iceberg-rest-service.md:
##########
@@ -425,6 +425,14 @@ Gravitino provides a pluggable metrics store interface to 
store and delete Icebe
 | `gravitino.iceberg-rest.metricsStoreRetainDays` | The days to retain Iceberg 
metrics in store, the value not greater than 0 means retain forever.            
                         | -1            | No       | 0.4.0         |
 | `gravitino.iceberg-rest.metricsQueueCapacity`   | The size of queue to store 
metrics temporally before storing to the persistent storage. Metrics will be 
dropped when queue is full. | 1000          | No       | 0.4.0         |
 
+If you want to use jdbc as metrics store, you can set the 
`gravitino.iceberg-rest.metricsStore` to `jdbc`, and set the following 
configurations to connect to the database. You should initialize the database 
using the sql scripts in the directory `scripts`.
+
+| Configuration item                         | Description                     
                                                                            | 
Default value     | Required | Since Version |
+|--------------------------------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|---------------|
+| `gravitino.iceberg-rest.jdbc-user`         | The username of the JDBC 
connection.                                                                     
   | (none)            | No       | 0.2.0         |

Review Comment:
   missing `uri`?



##########
iceberg/iceberg-rest-server/src/main/java/org/apache/gravitino/iceberg/service/metrics/JDBCMetricsStore.java:
##########
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.iceberg.service.metrics;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.time.Instant;
+import java.util.Map;
+import java.util.function.Consumer;
+import org.apache.gravitino.catalog.lakehouse.iceberg.IcebergPropertiesUtils;
+import org.apache.gravitino.json.JsonUtils;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.jdbc.JdbcClientPool;
+import org.apache.iceberg.jdbc.UncheckedInterruptedException;
+import org.apache.iceberg.jdbc.UncheckedSQLException;
+import org.apache.iceberg.metrics.CommitReport;
+import org.apache.iceberg.metrics.CounterResult;
+import org.apache.iceberg.metrics.MetricsReport;
+import org.apache.iceberg.metrics.ScanReport;
+import org.apache.iceberg.metrics.TimerResult;
+
+public class JDBCMetricsStore implements IcebergMetricsStore {
+  public static final String ICEBERG_METRICS_STORE_JDBC_NAME = "jdbc";
+  private static final String URI = "uri";
+  private static final String INSERT_COMMIT_REPORT_METRICS_SQL =
+      "INSERT INTO commit_metrics_report ("
+          + "timestamp, namespace, table_name, snapshot_id, sequence_number, 
operation,"
+          + "added_data_files, removed_data_files, total_data_files,"
+          + "added_delete_files, added_equality_delete_files,  
added_positional_delete_files, "
+          + "removed_delete_files, removed_equality_delete_files, 
removed_positional_delete_files, total_delete_files,"
+          + "added_records, removed_records, total_records,"
+          + "added_file_size_bytes, removed_file_size_bytes, 
total_file_size_bytes,"
+          + "added_positional_deletes, removed_positional_deletes, 
total_positional_deletes,"
+          + "added_equality_deletes, removed_equality_deletes, 
total_equality_deletes,"
+          + "manifests_created, manifests_replaced, manifests_kept, 
manifest_entries_processed,"
+          + "added_dvs, removed_dvs,"
+          + "total_duration_ms, attempts, metadata"
+          + ") VALUES "
+          + "(?, ?, ?, ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?,"
+          + " ?, ?, ?, ?,"
+          + " ?, ?,"
+          + " ?, ?, ?);";
+
+  private static final String INSERT_SCAN_REPORT_METRICS_SQL =
+      "INSERT INTO scan_metrics_report ("
+          + "timestamp, namespace, table_name, snapshot_id, schema_id, "
+          + "filter, metadata, projected_field_ids, projected_field_names, "
+          + "equality_delete_files, indexed_delete_files, 
positional_delete_files, "
+          + "result_data_files, result_delete_files, "
+          + "scanned_data_manifests, scanned_delete_manifests, "
+          + "skipped_data_files, skipped_data_manifests, "
+          + "skipped_delete_files, skipped_delete_manifests, "
+          + "total_data_manifests, total_delete_file_size_in_bytes, "
+          + "total_delete_manifests, total_file_size_in_bytes,"
+          + "total_planning_duration_ms) VALUES "
+          + "(?, ?, ?, ?, ?,"
+          + "?, ?, ?, ?,"
+          + "?, ? ,?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?, ?,"
+          + "?);";
+
+  private static final String DELETE_EXPIRED_SCAN_METRICS_SQL =
+      "DELETE FROM scan_metrics_report WHERE timestamp < ?; ";
+
+  private static final String DELETE_EXPIRED_COMMIT_METRICS_SQL =
+      "DELETE FROM commit_metrics_report WHERE timestamp < ?;";
+
+  @VisibleForTesting JdbcClientPool connections;
+
+  @Override
+  public void init(Map<String, String> properties) throws IOException {

Review Comment:
   Is it possible to create metrics table automaticly?



##########
docs/iceberg-rest-service.md:
##########
@@ -425,6 +425,14 @@ Gravitino provides a pluggable metrics store interface to 
store and delete Icebe
 | `gravitino.iceberg-rest.metricsStoreRetainDays` | The days to retain Iceberg 
metrics in store, the value not greater than 0 means retain forever.            
                         | -1            | No       | 0.4.0         |
 | `gravitino.iceberg-rest.metricsQueueCapacity`   | The size of queue to store 
metrics temporally before storing to the persistent storage. Metrics will be 
dropped when queue is full. | 1000          | No       | 0.4.0         |
 
+If you want to use jdbc as metrics store, you can set the 
`gravitino.iceberg-rest.metricsStore` to `jdbc`, and set the following 
configurations to connect to the database. You should initialize the database 
using the sql scripts in the directory `scripts`.
+
+| Configuration item                         | Description                     
                                                                            | 
Default value     | Required | Since Version |
+|--------------------------------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|---------------|
+| `gravitino.iceberg-rest.jdbc-user`         | The username of the JDBC 
connection.                                                                     
   | (none)            | No       | 0.2.0         |

Review Comment:
   could you add some prefix to `jdbc-xx`, like 
`gravitino.iceberg-rest.jdbc-metrics.jdbc-user`, the current `jdbc-user` is 
used for catalog backend, the user may doesn't want to use same RMDB instance.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to