This is an automated email from the ASF dual-hosted git repository.
leirui pushed a commit to branch research/M4-visualization
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/research/M4-visualization by
this push:
new 565f94701d uncompress save time
565f94701d is described below
commit 565f94701db091876e65c08269dfa309fb74bd97
Author: Lei Rui <[email protected]>
AuthorDate: Thu Apr 13 16:58:32 2023 +0800
uncompress save time
---
.../iotdb/tsfile/common/conf/TSFileConfig.java | 2 +-
.../tsfile/read/reader/chunk/ChunkReader.java | 63 ++++++++++++----------
2 files changed, 37 insertions(+), 28 deletions(-)
diff --git
a/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
b/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
index 2186a92162..b7ba5a57d5 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
@@ -111,7 +111,7 @@ public class TSFileConfig implements Serializable {
/** Default DFT satisfy rate is 0.1 */
private double dftSatisfyRate = 0.1;
/** Data compression method, TsFile supports UNCOMPRESSED, SNAPPY or LZ4. */
- private CompressionType compressor = CompressionType.SNAPPY;
+ private CompressionType compressor = CompressionType.UNCOMPRESSED;
/** Line count threshold for checking page memory occupied size. */
private int pageCheckSizeThreshold = 100;
/** Default endian value is BIG_ENDIAN. */
diff --git
a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java
b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java
index 2a4e1f7e4c..c794f87f78 100644
---
a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java
+++
b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java
@@ -25,6 +25,7 @@ import org.apache.iotdb.tsfile.encoding.decoder.Decoder;
import org.apache.iotdb.tsfile.file.MetaMarker;
import org.apache.iotdb.tsfile.file.header.ChunkHeader;
import org.apache.iotdb.tsfile.file.header.PageHeader;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
@@ -137,37 +138,45 @@ public class ChunkReader implements IChunkReader {
}
private PageReader constructPageReaderForNextPage(PageHeader pageHeader)
throws IOException {
- int compressedPageBodyLength = pageHeader.getCompressedSize();
- byte[] compressedPageBody = new byte[compressedPageBodyLength];
+ ByteBuffer pageData;
+ if (chunkHeader.getCompressionType() != CompressionType.UNCOMPRESSED) {
+ int compressedPageBodyLength = pageHeader.getCompressedSize();
+ byte[] compressedPageBody = new byte[compressedPageBodyLength];
+
+ // doesn't has a complete page body
+ if (compressedPageBodyLength > chunkDataBuffer.remaining()) {
+ throw new IOException(
+ "do not has a complete page body. Expected:"
+ + compressedPageBodyLength
+ + ". Actual:"
+ + chunkDataBuffer.remaining());
+ }
- // doesn't has a complete page body
- if (compressedPageBodyLength > chunkDataBuffer.remaining()) {
- throw new IOException(
- "do not has a complete page body. Expected:"
- + compressedPageBodyLength
- + ". Actual:"
- + chunkDataBuffer.remaining());
- }
+ chunkDataBuffer.get(compressedPageBody);
+ byte[] uncompressedPageData = new byte[pageHeader.getUncompressedSize()];
+ try {
+ unCompressor.uncompress(
+ compressedPageBody, 0, compressedPageBodyLength,
uncompressedPageData, 0);
+ } catch (Exception e) {
+ throw new IOException(
+ "Uncompress error! uncompress size: "
+ + pageHeader.getUncompressedSize()
+ + "compressed size: "
+ + pageHeader.getCompressedSize()
+ + "page header: "
+ + pageHeader
+ + e.getMessage());
+ }
- chunkDataBuffer.get(compressedPageBody);
+ pageData = ByteBuffer.wrap(uncompressedPageData);
+ } else {
+ // get a slice from chunkDataBuffer, and position chunkDataBuffer
+ pageData = chunkDataBuffer.slice();
+ pageData.limit(pageHeader.getCompressedSize());
+ chunkDataBuffer.position(chunkDataBuffer.position() +
pageHeader.getCompressedSize());
+ }
Decoder valueDecoder =
Decoder.getDecoderByType(chunkHeader.getEncodingType(),
chunkHeader.getDataType());
- byte[] uncompressedPageData = new byte[pageHeader.getUncompressedSize()];
- try {
- unCompressor.uncompress(
- compressedPageBody, 0, compressedPageBodyLength,
uncompressedPageData, 0);
- } catch (Exception e) {
- throw new IOException(
- "Uncompress error! uncompress size: "
- + pageHeader.getUncompressedSize()
- + "compressed size: "
- + pageHeader.getCompressedSize()
- + "page header: "
- + pageHeader
- + e.getMessage());
- }
-
- ByteBuffer pageData = ByteBuffer.wrap(uncompressedPageData);
PageReader reader =
new PageReader(
pageHeader, pageData, chunkHeader.getDataType(), valueDecoder,
timeDecoder, filter);