wu-sheng commented on code in PR #10544:
URL: https://github.com/apache/skywalking/pull/10544#discussion_r1138578783
##########
oap-server/server-storage-plugin/storage-jdbc-hikaricp-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/jdbc/common/dao/JDBCHistoryDeleteDAO.java:
##########
@@ -18,66 +18,100 @@
package org.apache.skywalking.oap.server.storage.plugin.jdbc.common.dao;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
+import lombok.RequiredArgsConstructor;
+import lombok.SneakyThrows;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.skywalking.oap.server.core.analysis.DownSampling;
+import org.apache.skywalking.oap.server.core.analysis.TimeBucket;
import org.apache.skywalking.oap.server.core.storage.IHistoryDeleteDAO;
import org.apache.skywalking.oap.server.core.storage.model.Model;
-import
org.apache.skywalking.oap.server.core.storage.model.SQLDatabaseModelExtension;
-import
org.apache.skywalking.oap.server.library.client.jdbc.JDBCClientException;
-import
org.apache.skywalking.oap.server.library.client.jdbc.hikaricp.JDBCHikariCPClient;
+import
org.apache.skywalking.oap.server.library.client.jdbc.hikaricp.JDBCClient;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.SQLBuilder;
+import
org.apache.skywalking.oap.server.storage.plugin.jdbc.common.JDBCTableInstaller;
+import org.apache.skywalking.oap.server.storage.plugin.jdbc.common.TableHelper;
import org.joda.time.DateTime;
-import lombok.RequiredArgsConstructor;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+
+@Slf4j
@RequiredArgsConstructor
public class JDBCHistoryDeleteDAO implements IHistoryDeleteDAO {
- private final JDBCHikariCPClient jdbcClient;
+ private final JDBCClient jdbcClient;
+ private final TableHelper tableHelper;
+ private final JDBCTableInstaller modelInstaller;
+
+ private final Map<Pair<Model, String>, Boolean> tableExistenceCache = new
ConcurrentHashMap<>();
@Override
- public void deleteHistory(Model model, String timeBucketColumnName, int
ttl) throws IOException {
- SQLBuilder dataDeleteSQL = new SQLBuilder("delete from " +
model.getName() + " where ")
- .append(timeBucketColumnName).append("<= ? ")
- .append(" and ")
- .append(timeBucketColumnName).append(">= ? ");
+ @SneakyThrows
+ public void deleteHistory(Model model, String timeBucketColumnName, int
ttl) {
+ final var endTimeBucket =
TimeBucket.getTimeBucket(System.currentTimeMillis(), DownSampling.Day);
+ final var startTimeBucket = endTimeBucket - ttl;
+ log.info(
+ "Deleting history data, ttl: {}, now: {}. Keep [{}, {}]",
+ ttl,
+ System.currentTimeMillis(),
+ startTimeBucket,
+ endTimeBucket
+ );
+
+ final var ttlTables = tableHelper.getTablesForRead(model.getName(),
startTimeBucket, endTimeBucket);
+ final var tablesToDrop = new HashSet<String>();
- try (Connection connection = jdbcClient.getConnection()) {
- long deadline;
- long minTime;
- if (model.isRecord()) {
- deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMddHHmmss"));
- minTime = 1000_00_00_00_00_00L;
- } else {
- switch (model.getDownsampling()) {
- case Minute:
- deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMddHHmm"));
- minTime = 1000_00_00_00_00L;
- break;
- case Hour:
- deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMddHH"));
- minTime = 1000_00_00_00L;
- break;
- case Day:
- deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMdd"));
- minTime = 1000_00_00L;
- break;
- default:
- return;
- }
+ try (final var conn = jdbcClient.getConnection();
+ final var result = conn.getMetaData().getTables(null, null,
TableHelper.getTableName(model) + "%", new String[]{"TABLE"})) {
+ while (result.next()) {
+ tablesToDrop.add(result.getString("TABLE_NAME"));
}
- jdbcClient.executeUpdate(connection, dataDeleteSQL.toString(),
deadline, minTime);
- // Delete additional tables
- for (SQLDatabaseModelExtension.AdditionalTable additionalTable :
model.getSqlDBModelExtension()
-
.getAdditionalTables()
-
.values()) {
- SQLBuilder additionalTableDeleteSQL = new SQLBuilder("delete
from " + additionalTable.getName() + " where ")
- .append(timeBucketColumnName).append("<= ? ")
- .append(" and ")
- .append(timeBucketColumnName).append(">= ? ");
- jdbcClient.executeUpdate(connection,
additionalTableDeleteSQL.toString(), deadline, minTime);
+ }
+
+ ttlTables.forEach(tablesToDrop::remove);
+ tablesToDrop.removeIf(it -> !it.matches(".*_\\d{8}"));
+ for (String table : tablesToDrop) {
+ final var dropSql = new SQLBuilder("drop table if exists
").append(table);
+ jdbcClient.executeUpdate(dropSql.toString());
+ }
+
+ long deadline;
+ long minTime;
+ if (model.isRecord()) {
+ deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMddHHmmss"));
+ minTime = 1000_00_00_00_00_00L;
+ } else {
+ switch (model.getDownsampling()) {
+ case Minute:
+ deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMddHHmm"));
+ minTime = 1000_00_00_00_00L;
+ break;
+ case Hour:
+ deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMddHH"));
+ minTime = 1000_00_00_00L;
+ break;
+ case Day:
+ deadline = Long.parseLong(new
DateTime().plusDays(-ttl).toString("yyyyMMdd"));
+ minTime = 1000_00_00L;
+ break;
+ default:
+ return;
}
- } catch (JDBCClientException | SQLException e) {
- throw new IOException(e.getMessage(), e);
+ }
+ // Delete data in additional tables
+ for (final var additionalTable :
model.getSqlDBModelExtension().getAdditionalTables().values()) {
+ SQLBuilder additionalTableDeleteSQL = new SQLBuilder("delete from
" + additionalTable.getName() + " where ")
Review Comment:
Why `delete` rather than `drop`? I think the additional table should follow
the timeseries process policy.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]