MyXOF commented on a change in pull request #32: fix sonar issues
URL: https://github.com/apache/incubator-iotdb/pull/32#discussion_r251708127
##########
File path:
iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
##########
@@ -1481,224 +1428,154 @@ private String
queryAndWriteDataForMerge(IntervalFileNode backupIntervalFile)
continue;
}
for (Path path : pathList) {
- // query one measurenment in the special deviceId
+ // query one measurement in the special deviceId
String measurementId = path.getMeasurement();
TSDataType dataType = mManager.getSeriesType(path.getFullPath());
OverflowSeriesDataSource overflowSeriesDataSource =
overflowProcessor.queryMerge(deviceId,
- measurementId, dataType,true);
+ measurementId, dataType, true);
Filter timeFilter = FilterFactory
- .and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
- TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
+
.and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
+
TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
SingleSeriesExpression seriesFilter = new SingleSeriesExpression(path,
timeFilter);
IReader seriesReader = SeriesReaderFactory.getInstance()
- .createSeriesReaderForMerge(backupIntervalFile,
- overflowSeriesDataSource, seriesFilter);
- try {
- if (!seriesReader.hasNext()) {
- LOGGER.debug(
- "The time-series {} has no data with the filter {} in the
filenode processor {}",
- path, seriesFilter, getProcessorName());
- } else {
- numOfChunk++;
- TimeValuePair timeValuePair = seriesReader.next();
- if (fileIoWriter == null) {
- baseDir = directories.getNextFolderForTsfile();
- fileName = String.valueOf(timeValuePair.getTimestamp()
- + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR +
System.currentTimeMillis());
- outputPath = constructOutputFilePath(baseDir,
getProcessorName(), fileName);
- fileName = getProcessorName() + File.separatorChar + fileName;
- fileIoWriter = new TsFileIOWriter(new File(outputPath));
- }
- if (!isRowGroupHasData) {
- // start a new rowGroupMetadata
- isRowGroupHasData = true;
- // the datasize and numOfChunk is fake
- // the accurate datasize and numOfChunk will get after write all
this device data.
- fileIoWriter.startFlushChunkGroup(deviceId);// TODO please check
me.
- startPos = fileIoWriter.getPos();
- }
- // init the serieswWriteImpl
- MeasurementSchema measurementSchema =
fileSchema.getMeasurementSchema(measurementId);
- ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
- int pageSizeThreshold = TsFileConf.pageSizeInByte;
- ChunkWriterImpl seriesWriterImpl = new
ChunkWriterImpl(measurementSchema, pageWriter,
- pageSizeThreshold);
- // write the series data
- recordCount += writeOneSeries(deviceId, measurementId,
seriesWriterImpl, dataType,
- seriesReader,
- startTimeMap, endTimeMap, timeValuePair);
- // flush the series data
- seriesWriterImpl.writeToFileWriter(fileIoWriter);
- }
- } finally {
- seriesReader.close();
- }
+ .createSeriesReaderForMerge(backupIntervalFile,
+ overflowSeriesDataSource, seriesFilter);
+ numOfChunk += queryAndWriteSeries(seriesReader, path, seriesFilter,
dataType,
+ startTimeMap, endTimeMap);
}
- if (isRowGroupHasData) {
+ if (mergeIsChunkGroupHasData) {
// end the new rowGroupMetadata
- long size = fileIoWriter.getPos() - startPos;
+ long size = mergeFileWriter.getPos() - mergeStartPos;
footer = new ChunkGroupFooter(deviceId, size, numOfChunk);
- fileIoWriter.endChunkGroup(footer);
+ mergeFileWriter.endChunkGroup(footer);
}
}
- if (fileIoWriter != null) {
- fileIoWriter.endFile(fileSchema);
+ if (mergeFileWriter != null) {
+ mergeFileWriter.endFile(fileSchema);
}
-
backupIntervalFile.setBaseDirIndex(directories.getTsFileFolderIndex(baseDir));
- backupIntervalFile.setRelativePath(fileName);
- backupIntervalFile.overflowChangeType = OverflowChangeType.NO_CHANGE;
+
backupIntervalFile.setBaseDirIndex(directories.getTsFileFolderIndex(mergeBaseDir));
+ backupIntervalFile.setRelativePath(mergeFileName);
+ backupIntervalFile.setOverflowChangeType(OverflowChangeType.NO_CHANGE);
backupIntervalFile.setStartTimeMap(startTimeMap);
backupIntervalFile.setEndTimeMap(endTimeMap);
- return fileName;
+ return mergeFileName;
+ }
+
+ private int queryAndWriteSeries(IReader seriesReader, Path path,
+ SingleSeriesExpression seriesFilter,
TSDataType dataType,
+ Map<String, Long> startTimeMap, Map<String,
Long> endTimeMap)
+ throws IOException {
+ int numOfChunk = 0;
+ try {
+ if (!seriesReader.hasNext()) {
+ LOGGER.debug(
+ "The time-series {} has no data with the filter {} in the
filenode processor {}",
+ path, seriesFilter, getProcessorName());
+ } else {
+ numOfChunk++;
+ TimeValuePair timeValuePair = seriesReader.next();
+ if (mergeFileWriter == null) {
+ mergeBaseDir = directories.getNextFolderForTsfile();
+ mergeFileName = timeValuePair.getTimestamp()
+ + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR +
System.currentTimeMillis();
+ mergeOutputPath = constructOutputFilePath(mergeBaseDir,
getProcessorName(),
+ mergeFileName);
+ mergeFileName = getProcessorName() + File.separatorChar +
mergeFileName;
+ mergeFileWriter = new TsFileIOWriter(new File(mergeOutputPath));
+ }
+ if (!mergeIsChunkGroupHasData) {
+ // start a new rowGroupMetadata
+ mergeIsChunkGroupHasData = true;
+ // the datasize and numOfChunk is fake
+ // the accurate datasize and numOfChunk will get after write all
this device data.
+ mergeFileWriter.startFlushChunkGroup(path.getDevice());// TODO
please check me.
+ mergeStartPos = mergeFileWriter.getPos();
+ }
+ // init the serieswWriteImpl
+ MeasurementSchema measurementSchema = fileSchema
+ .getMeasurementSchema(path.getMeasurement());
+ ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
+ int pageSizeThreshold = TSFileConfig.pageSizeInByte;
+ ChunkWriterImpl seriesWriterImpl = new
ChunkWriterImpl(measurementSchema, pageWriter,
+ pageSizeThreshold);
+ // write the series data
+ writeOneSeries(path.getDevice(), seriesWriterImpl, dataType,
+ seriesReader,
+ startTimeMap, endTimeMap, timeValuePair);
+ // flush the series data
+ seriesWriterImpl.writeToFileWriter(mergeFileWriter);
+ }
+ } finally {
+ seriesReader.close();
+ }
+ return numOfChunk;
+ }
+
+
+ private void writeOneSeries(String deviceId, ChunkWriterImpl
seriesWriterImpl,
+ TSDataType dataType, IReader seriesReader,
+ Map<String, Long> startTimeMap,
+ Map<String, Long> endTimeMap,
+ TimeValuePair timeValuePair) throws IOException {
+ long startTime;
+ long endTime;
+ TimeValuePair localTV = timeValuePair;
+ writeTVPair(seriesWriterImpl, dataType, localTV);
+ startTime = endTime = localTV.getTimestamp();
+ if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId) >
startTime) {
+ startTimeMap.put(deviceId, startTime);
+ }
+ if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
+ endTimeMap.put(deviceId, endTime);
+ }
+ while (seriesReader.hasNext()) {
+ localTV = seriesReader.next();
+ endTime = localTV.getTimestamp();
+ writeTVPair(seriesWriterImpl, dataType, localTV);
+ }
+ if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
+ endTimeMap.put(deviceId, endTime);
+ }
}
- private int writeOneSeries(String deviceId, String measurement,
ChunkWriterImpl seriesWriterImpl,
- TSDataType dataType, IReader seriesReader, Map<String, Long>
startTimeMap,
- Map<String, Long> endTimeMap,
- TimeValuePair timeValuePair) throws IOException {
- int count = 0;
- long startTime = -1;
- long endTime = -1;
+ private void writeTVPair(ChunkWriterImpl seriesWriterImpl, TSDataType
dataType,
+ TimeValuePair timeValuePair) throws IOException {
switch (dataType) {
case BOOLEAN:
seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getBoolean());
- count++;
- startTime = endTime = timeValuePair.getTimestamp();
- if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId)
> startTime) {
- startTimeMap.put(deviceId, startTime);
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
- while (seriesReader.hasNext()) {
- count++;
- timeValuePair = seriesReader.next();
- endTime = timeValuePair.getTimestamp();
- seriesWriterImpl
- .write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getBoolean());
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
break;
case INT32:
seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getInt());
- count++;
- startTime = endTime = timeValuePair.getTimestamp();
- if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId)
> startTime) {
- startTimeMap.put(deviceId, startTime);
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
- while (seriesReader.hasNext()) {
- count++;
- timeValuePair = seriesReader.next();
- endTime = timeValuePair.getTimestamp();
- seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getInt());
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
break;
case INT64:
seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getLong());
- count++;
- startTime = endTime = timeValuePair.getTimestamp();
- if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId)
> startTime) {
- startTimeMap.put(deviceId, startTime);
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
- while (seriesReader.hasNext()) {
- count++;
- timeValuePair = seriesReader.next();
- endTime = timeValuePair.getTimestamp();
- seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getLong());
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
break;
case FLOAT:
seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getFloat());
- count++;
- startTime = endTime = timeValuePair.getTimestamp();
- if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId)
> startTime) {
- startTimeMap.put(deviceId, startTime);
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
- while (seriesReader.hasNext()) {
- count++;
- timeValuePair = seriesReader.next();
- endTime = timeValuePair.getTimestamp();
- seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getFloat());
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
break;
case DOUBLE:
seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getDouble());
- count++;
- startTime = endTime = timeValuePair.getTimestamp();
- if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId)
> startTime) {
- startTimeMap.put(deviceId, startTime);
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
- while (seriesReader.hasNext()) {
- count++;
- timeValuePair = seriesReader.next();
- endTime = timeValuePair.getTimestamp();
- seriesWriterImpl
- .write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getDouble());
- }
- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) <
endTime) {
- endTimeMap.put(deviceId, endTime);
- }
break;
case TEXT:
seriesWriterImpl.write(timeValuePair.getTimestamp(),
timeValuePair.getValue().getBinary());
- count++;
Review comment:
这些下面的操作都不要了?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services