jt2594838 commented on a change in pull request #1956:
URL: https://github.com/apache/iotdb/pull/1956#discussion_r519516225
##########
File path:
cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
##########
@@ -945,23 +1008,34 @@ private int comparePersistLogFileName(File file1, File
file2) {
if (endIndex - startIndex > maxNumberOfLogsPerFetchOnDisk) {
newEndIndex = startIndex + maxNumberOfLogsPerFetchOnDisk;
}
- logger.debug("intend to get logs between[{}, {}], actually get logs
between[{},{}]", startIndex,
- endIndex, startIndex, newEndIndex);
+ logger
+ .debug("intend to get logs between[{}, {}], actually get logs
between[{},{}]", startIndex,
+ endIndex, startIndex, newEndIndex);
- List<Pair<File, Pair<Long, Long>>> logDataFileAndOffsetList =
getLogDataFileAndOffset(
- startIndex, newEndIndex);
- if (logDataFileAndOffsetList.isEmpty()) {
- return Collections.emptyList();
- }
+ // maybe the logs will be delete during checkDeletePersistRaftLog or
clearAllLogs,
Review comment:
"will be deleted"
##########
File path:
cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
##########
@@ -835,6 +834,59 @@ public void close() {
}
}
+ @Override
+ public void clearAllLogs(long commitIndex) {
+ lock.writeLock().lock();
+ try {
+ // 1. delete
+ forceFlushLogBuffer();
+ closeCurrentFile(meta.getCommitLogIndex());
+ for (int i = 0; i < logDataFileList.size(); i++) {
+ deleteLogDataAndIndexFile(i);
+ }
+ deleteMetaFile();
+
+ logDataFileList.clear();
+ logIndexFileList.clear();
+
+ // 2. init
+ if (!logIndexOffsetList.isEmpty()) {
+ this.firstLogIndex = Math
+ .max(commitIndex + 1, firstLogIndex + logIndexOffsetList.size());
+ } else {
+ this.firstLogIndex = commitIndex + 1;
+ }
+ this.logIndexOffsetList.clear();
+ recoverMetaFile();
+ meta = new LogManagerMeta();
+ createNewLogFile(logDir, firstLogIndex);
+ logger.info("{}, clean all logs success, the new firstLogIndex={}",
this, firstLogIndex);
+ } catch (IOException e) {
+ logger.error("clear all logs failed,", e);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ private void deleteMetaFile() {
+ lock.writeLock().lock();
+ try {
+ File tmpMetaFile = SystemFileFactory.INSTANCE.getFile(logDir +
"logMeta.tmp");
+ if (tmpMetaFile.exists()) {
+ Files.delete(tmpMetaFile.toPath());
+ }
Review comment:
Can be simplified with `Files.deleteIfExists`
##########
File path:
cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
##########
@@ -945,23 +1008,34 @@ private int comparePersistLogFileName(File file1, File
file2) {
if (endIndex - startIndex > maxNumberOfLogsPerFetchOnDisk) {
newEndIndex = startIndex + maxNumberOfLogsPerFetchOnDisk;
}
- logger.debug("intend to get logs between[{}, {}], actually get logs
between[{},{}]", startIndex,
- endIndex, startIndex, newEndIndex);
+ logger
+ .debug("intend to get logs between[{}, {}], actually get logs
between[{},{}]", startIndex,
+ endIndex, startIndex, newEndIndex);
- List<Pair<File, Pair<Long, Long>>> logDataFileAndOffsetList =
getLogDataFileAndOffset(
- startIndex, newEndIndex);
- if (logDataFileAndOffsetList.isEmpty()) {
- return Collections.emptyList();
- }
+ // maybe the logs will be delete during checkDeletePersistRaftLog or
clearAllLogs,
+ // use writeLock for two reasons:
+ // 1.if the log file to read is the last log file, we need to get write
lock to flush logBuffer,
+ // 2.avoid log files be deleted
+ lock.writeLock().lock();
Review comment:
Is there anywhere using read lock of this lock? If not, maybe it is
better to consider using a more light-weighted lock.
##########
File path:
cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
##########
@@ -945,23 +1008,34 @@ private int comparePersistLogFileName(File file1, File
file2) {
if (endIndex - startIndex > maxNumberOfLogsPerFetchOnDisk) {
newEndIndex = startIndex + maxNumberOfLogsPerFetchOnDisk;
}
- logger.debug("intend to get logs between[{}, {}], actually get logs
between[{},{}]", startIndex,
- endIndex, startIndex, newEndIndex);
+ logger
+ .debug("intend to get logs between[{}, {}], actually get logs
between[{},{}]", startIndex,
+ endIndex, startIndex, newEndIndex);
- List<Pair<File, Pair<Long, Long>>> logDataFileAndOffsetList =
getLogDataFileAndOffset(
- startIndex, newEndIndex);
- if (logDataFileAndOffsetList.isEmpty()) {
- return Collections.emptyList();
- }
+ // maybe the logs will be delete during checkDeletePersistRaftLog or
clearAllLogs,
+ // use writeLock for two reasons:
+ // 1.if the log file to read is the last log file, we need to get write
lock to flush logBuffer,
+ // 2.avoid log files be deleted
Review comment:
"being deleted"
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]