zhanglingzhe0820 commented on a change in pull request #1758:
URL: https://github.com/apache/iotdb/pull/1758#discussion_r508538951



##########
File path: 
server/src/main/java/org/apache/iotdb/db/engine/tsfilemanagement/utils/HotCompactionUtils.java
##########
@@ -91,130 +84,204 @@ private HotCompactionUtils() {
     return new Pair<>(newChunkMetadata, newChunk);
   }
 
-  private static long readUnseqChunk(String storageGroup,
-      Map<String, TsFileSequenceReader> tsFileSequenceReaderMap, String 
deviceId, long maxVersion,
-      String measurementId,
-      Map<Long, TimeValuePair> timeValuePairMap, List<TsFileResource> 
levelResources)
+  private static long readByDeserializeMerge(RateLimiter 
compactionReadRateLimiter,
+      Map<TsFileSequenceReader, List<ChunkMetadata>> readerChunkMetadataMap, 
long maxVersion,
+      Map<Long, TimeValuePair> timeValuePairMap)
       throws IOException {
-    for (TsFileResource levelResource : levelResources) {
-      TsFileSequenceReader reader = 
buildReaderFromTsFileResource(levelResource,
-          tsFileSequenceReaderMap,
-          storageGroup);
-      if (reader == null) {
-        continue;
-      }
-      List<ChunkMetadata> chunkMetadataList = reader
-          .getChunkMetadataList(new Path(deviceId, measurementId));
+    for (Entry<TsFileSequenceReader, List<ChunkMetadata>> entry : 
readerChunkMetadataMap
+        .entrySet()) {
+      TsFileSequenceReader reader = entry.getKey();
+      List<ChunkMetadata> chunkMetadataList = entry.getValue();
       for (ChunkMetadata chunkMetadata : chunkMetadataList) {
         maxVersion = Math.max(chunkMetadata.getVersion(), maxVersion);
         IChunkReader chunkReader = new ChunkReaderByTimestamp(
             reader.readMemChunk(chunkMetadata));
+        long chunkSize = 0;
         while (chunkReader.hasNextSatisfiedPage()) {
           IPointReader iPointReader = new BatchDataIterator(
               chunkReader.nextPageData());
           while (iPointReader.hasNextTimeValuePair()) {
             TimeValuePair timeValuePair = iPointReader.nextTimeValuePair();
+            chunkSize += timeValuePair.getSize();
             timeValuePairMap.put(timeValuePair.getTimestamp(), timeValuePair);
           }
         }
+        MergeManager
+            .mergeRateLimiterAcquire(compactionReadRateLimiter, chunkSize);
       }
     }
     return maxVersion;
   }
 
-  private static void fillDeviceMeasurementMap(Set<String> devices,
-      Map<String, Map<String, MeasurementSchema>> deviceMeasurementMap,
-      List<TsFileResource> subLevelResources,
+  private static long writeByAppendMerge(long maxVersion, String device,
+      RateLimiter compactionWriteRateLimiter, RateLimiter 
compactionReadRateLimiter,
+      Map<TsFileSequenceReader, List<ChunkMetadata>> readerChunkMetadatasMap,
+      TsFileResource targetResource, RestorableTsFileIOWriter writer) throws 
IOException {
+    Pair<ChunkMetadata, Chunk> chunkPair = 
readByAppendMerge(compactionReadRateLimiter,
+        readerChunkMetadatasMap);
+    ChunkMetadata newChunkMetadata = chunkPair.left;
+    Chunk newChunk = chunkPair.right;
+    if (newChunkMetadata != null && newChunk != null) {
+      maxVersion = Math.max(newChunkMetadata.getVersion(), maxVersion);
+      // wait for limit write
+      MergeManager.mergeRateLimiterAcquire(compactionWriteRateLimiter,
+          newChunk.getHeader().getDataSize() + newChunk.getData().position());

Review comment:
       accept




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to