jixuan1989 commented on a change in pull request #32: fix sonar issues
URL: https://github.com/apache/incubator-iotdb/pull/32#discussion_r251665742
##########
File path:
iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
##########
@@ -1481,224 +1428,154 @@ private String
queryAndWriteDataForMerge(IntervalFileNode backupIntervalFile)
continue;
}
for (Path path : pathList) {
- // query one measurenment in the special deviceId
+ // query one measurement in the special deviceId
String measurementId = path.getMeasurement();
TSDataType dataType = mManager.getSeriesType(path.getFullPath());
OverflowSeriesDataSource overflowSeriesDataSource =
overflowProcessor.queryMerge(deviceId,
- measurementId, dataType,true);
+ measurementId, dataType, true);
Filter timeFilter = FilterFactory
- .and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
- TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
+
.and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
+
TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
SingleSeriesExpression seriesFilter = new SingleSeriesExpression(path,
timeFilter);
IReader seriesReader = SeriesReaderFactory.getInstance()
- .createSeriesReaderForMerge(backupIntervalFile,
- overflowSeriesDataSource, seriesFilter);
- try {
- if (!seriesReader.hasNext()) {
- LOGGER.debug(
- "The time-series {} has no data with the filter {} in the
filenode processor {}",
- path, seriesFilter, getProcessorName());
- } else {
- numOfChunk++;
- TimeValuePair timeValuePair = seriesReader.next();
- if (fileIoWriter == null) {
- baseDir = directories.getNextFolderForTsfile();
- fileName = String.valueOf(timeValuePair.getTimestamp()
- + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR +
System.currentTimeMillis());
- outputPath = constructOutputFilePath(baseDir,
getProcessorName(), fileName);
- fileName = getProcessorName() + File.separatorChar + fileName;
- fileIoWriter = new TsFileIOWriter(new File(outputPath));
- }
- if (!isRowGroupHasData) {
- // start a new rowGroupMetadata
- isRowGroupHasData = true;
- // the datasize and numOfChunk is fake
- // the accurate datasize and numOfChunk will get after write all
this device data.
- fileIoWriter.startFlushChunkGroup(deviceId);// TODO please check
me.
- startPos = fileIoWriter.getPos();
- }
- // init the serieswWriteImpl
- MeasurementSchema measurementSchema =
fileSchema.getMeasurementSchema(measurementId);
- ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
- int pageSizeThreshold = TsFileConf.pageSizeInByte;
- ChunkWriterImpl seriesWriterImpl = new
ChunkWriterImpl(measurementSchema, pageWriter,
- pageSizeThreshold);
- // write the series data
- recordCount += writeOneSeries(deviceId, measurementId,
seriesWriterImpl, dataType,
- seriesReader,
- startTimeMap, endTimeMap, timeValuePair);
- // flush the series data
- seriesWriterImpl.writeToFileWriter(fileIoWriter);
- }
- } finally {
- seriesReader.close();
- }
+ .createSeriesReaderForMerge(backupIntervalFile,
+ overflowSeriesDataSource, seriesFilter);
+ numOfChunk += queryAndWriteSeries(seriesReader, path, seriesFilter,
dataType,
+ startTimeMap, endTimeMap);
}
- if (isRowGroupHasData) {
+ if (mergeIsRowGroupHasData) {
Review comment:
we will replace rowGroup with chunkGroup
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services