zerolbsony commented on code in PR #17279:
URL: https://github.com/apache/iotdb/pull/17279#discussion_r2958151938
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java:
##########
@@ -3117,4 +3136,350 @@ public TSStatus writeAuditLog(TAuditLogReq req) {
public void handleClientExit() {
// Do nothing
}
+
+ // ====================================================
+ // Data Partition Table Integrity Check Implementation
+ // ====================================================
+
+ private volatile DataPartitionTableGenerator currentGenerator;
+ private volatile CompletableFuture<Void> currentGeneratorFuture;
+ private volatile long currentTaskId = 0;
+
+ @Override
+ public TGetEarliestTimeslotsResp getEarliestTimeslots() {
+ TGetEarliestTimeslotsResp resp = new TGetEarliestTimeslotsResp();
+
+ try {
+ Map<String, Long> earliestTimeslots = new HashMap<>();
+
+ // Get data directories from configuration
+ String[] dataDirs =
IoTDBDescriptor.getInstance().getConfig().getDataDirs();
+
+ for (String dataDir : dataDirs) {
+ File dir = new File(dataDir);
+ if (dir.exists() && dir.isDirectory()) {
+ processDataDirectoryForEarliestTimeslots(dir, earliestTimeslots);
+ }
+ }
+
+ resp.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS));
+ resp.setDatabaseToEarliestTimeslot(earliestTimeslots);
+
+ LOGGER.info("Retrieved earliest timeslots for {} databases",
earliestTimeslots.size());
+
+ } catch (Exception e) {
+ LOGGER.error("Failed to get earliest timeslots", e);
+ resp.setStatus(
+ onIoTDBException(
+ e,
+ OperationType.GET_EARLIEST_TIMESLOTS,
+ TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()));
+ }
+
+ return resp;
+ }
+
+ @Override
+ public TGenerateDataPartitionTableResp generateDataPartitionTable(
+ TGenerateDataPartitionTableReq req) {
+ TGenerateDataPartitionTableResp resp = new
TGenerateDataPartitionTableResp();
+
+ try {
+ // Check if there's already a task in the progress
+ if (currentGenerator != null
+ && currentGenerator.getStatus() ==
DataPartitionTableGenerator.TaskStatus.IN_PROGRESS) {
+
resp.setErrorCode(DataPartitionTableGeneratorState.IN_PROGRESS.getCode());
+ resp.setMessage("DataPartitionTable generation is already in the
progress");
+ resp.setStatus(RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR));
+ return resp;
+ }
+
+ // Create generator for all data directories
+ int seriesSlotNum =
IoTDBDescriptor.getInstance().getConfig().getSeriesPartitionSlotNum();
+ String seriesPartitionExecutorClass =
+
IoTDBDescriptor.getInstance().getConfig().getSeriesPartitionExecutorClass();
+
+ final ExecutorService partitionTableRecoverExecutor =
+ new WrappedThreadPoolExecutor(
+ 0,
+
IoTDBDescriptor.getInstance().getConfig().getPartitionTableRecoverWorkerNum(),
+ 0L,
+ TimeUnit.SECONDS,
+ new ArrayBlockingQueue<>(
+
IoTDBDescriptor.getInstance().getConfig().getPartitionTableRecoverWorkerNum()),
+ new
IoTThreadFactory(ThreadName.DATA_PARTITION_RECOVER_PARALLEL_POOL.getName()),
+ ThreadName.DATA_PARTITION_RECOVER_PARALLEL_POOL.getName(),
+ new ThreadPoolExecutor.CallerRunsPolicy());
+
+ currentGenerator =
+ new DataPartitionTableGenerator(
+ partitionTableRecoverExecutor,
+ req.getDatabases(),
+ seriesSlotNum,
+ seriesPartitionExecutorClass);
+ currentTaskId = System.currentTimeMillis();
+
+ // Start generation synchronously for now to return the data partition
table immediately
+ currentGeneratorFuture = currentGenerator.startGeneration();
+ parseGenerationStatus(resp);
Review Comment:
> Add some synchronization to avoid creating multiple generators if you do
not intend to guarantee that only the CN leader will issue the repair.
Changed yet, see here
<img width="2682" height="1083" alt="image"
src="https://github.com/user-attachments/assets/49ca3d01-87ad-4dc8-bbf0-f4923cfbd66b"
/>
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]