Copilot commented on code in PR #16829:
URL: https://github.com/apache/iotdb/pull/16829#discussion_r2575987866
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/function/partition/Slice.java:
##########
@@ -186,6 +187,20 @@ public Object getObject(int columnIndex) {
return originalColumns[columnIndex].getObject(offset);
}
+ @Override
+ public Binary readObject(int columnIndex, long offset, int length) {
+ if (getDataType(columnIndex) == Type.OBJECT) {
Review Comment:
The condition is inverted. The method should throw an exception when the
column is NOT an object type, but currently it throws when it IS an object type.
Should be: `if (getDataType(columnIndex) != Type.OBJECT)`
```suggestion
if (getDataType(columnIndex) != Type.OBJECT) {
```
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/ObjectTypeUtils.java:
##########
@@ -40,6 +58,117 @@ public class ObjectTypeUtils {
private ObjectTypeUtils() {}
+ public static ByteBuffer readObjectContent(
+ Binary binary, long offset, int length, boolean mayNotInCurrentNode) {
+ Pair<Long, String> objectLengthPathPair =
ObjectTypeUtils.parseObjectBinary(binary);
+ long fileLength = objectLengthPathPair.getLeft();
+ String relativePath = objectLengthPathPair.getRight();
+ int actualReadSize =
+ ObjectTypeUtils.getActualReadSize(
+ relativePath, fileLength, offset, length < 0 ? fileLength :
length);
+ return ObjectTypeUtils.readObjectContent(
+ relativePath, offset, actualReadSize, mayNotInCurrentNode);
+ }
+
+ public static ByteBuffer readObjectContent(
+ String relativePath, long offset, int readSize, boolean
mayNotInCurrentNode) {
+ Optional<File> objectFile =
TIER_MANAGER.getAbsoluteObjectFilePath(relativePath, false);
+ if (objectFile.isPresent()) {
+ return readObjectContentFromLocalFile(objectFile.get(), offset,
readSize);
+ }
+ if (mayNotInCurrentNode) {
+ return readObjectContentFromRemoteFile(relativePath, offset, readSize);
+ }
+ throw new ObjectFileNotExist(relativePath);
+ }
+
+ private static ByteBuffer readObjectContentFromLocalFile(File file, long
offset, long readSize) {
+ byte[] bytes = new byte[(int) readSize];
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ try (FileChannel fileChannel = FileChannel.open(file.toPath(),
StandardOpenOption.READ)) {
+ fileChannel.read(buffer, offset);
+ } catch (IOException e) {
+ throw new IoTDBRuntimeException(e,
TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
+ }
+ buffer.flip();
+ return buffer;
+ }
+
+ private static ByteBuffer readObjectContentFromRemoteFile(
+ final String relativePath, final long offset, final int readSize) {
+ ByteBuffer buffer = ByteBuffer.allocate(readSize);
+ TConsensusGroupId consensusGroupId =
+ new TConsensusGroupId(
+ TConsensusGroupType.DataRegion,
+ Integer.parseInt(Paths.get(relativePath).getName(0).toString()));
+ List<TRegionReplicaSet> regionReplicaSetList =
+ ClusterPartitionFetcher.getInstance()
+ .getRegionReplicaSet(Collections.singletonList(consensusGroupId));
+ TRegionReplicaSet regionReplicaSet =
regionReplicaSetList.iterator().next();
+ final int batchSize = 1024 * 1024;
+ final TReadObjectReq req = new TReadObjectReq();
+ req.setRelativePath(relativePath);
+ for (int i = 0; i < regionReplicaSet.getDataNodeLocations().size(); i++) {
+ TDataNodeLocation dataNodeLocation =
regionReplicaSet.getDataNodeLocations().get(i);
+ int toReadSizeInCurrentDataNode = readSize;
+ try (SyncDataNodeInternalServiceClient client =
+ Coordinator.getInstance()
+ .getInternalServiceClientManager()
+ .borrowClient(dataNodeLocation.getInternalEndPoint())) {
+ while (toReadSizeInCurrentDataNode > 0) {
+ req.setOffset(offset + buffer.position());
+ req.setSize(Math.min(toReadSizeInCurrentDataNode, batchSize));
+ toReadSizeInCurrentDataNode -= req.getSize();
+ ByteBuffer partial = client.readObject(req);
+ buffer.put(partial);
+ }
+ } catch (Exception e) {
+ logger.error("Failed to read object from datanode: {}",
dataNodeLocation, e);
+ if (i == regionReplicaSet.getDataNodeLocations().size() - 1) {
+ throw new IoTDBRuntimeException(e,
TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
+ }
+ buffer.clear();
+ req.setOffset(offset);
+ continue;
+ }
+ break;
+ }
+ buffer.flip();
+ return buffer;
+ }
+
+ public static int getActualReadSize(String filePath, long fileSize, long
offset, long length) {
+ if (offset >= fileSize) {
+ throw new SemanticException(
+ String.format(
+ "offset %d is greater than object size %d, file path is %s",
Review Comment:
The error message should say "offset %d is greater than or equal to object
size %d" since the condition checks `offset >= fileSize`, not just `offset >
fileSize`.
```suggestion
"offset %d is greater than or equal to object size %d, file
path is %s",
```
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/transformation/dag/column/unary/scalar/ReadObjectColumnTransformer.java:
##########
@@ -107,35 +100,14 @@ private void transform(Column column, ColumnBuilder
columnBuilder, int i) {
}
private Binary readObject(Binary binary) {
- File file = ObjectTypeUtils.getObjectPathFromBinary(binary);
- long actualReadSize = getActualReadSize(file);
+ Pair<Long, String> ObjectLengthPathPair =
ObjectTypeUtils.parseObjectBinary(binary);
+ long fileLength = ObjectLengthPathPair.getLeft();
+ String relativePath = ObjectLengthPathPair.getRight();
Review Comment:
Variable name `ObjectLengthPathPair` should follow Java naming conventions
for local variables and start with a lowercase letter. Should be:
`objectLengthPathPair`
```suggestion
Pair<Long, String> objectLengthPathPair =
ObjectTypeUtils.parseObjectBinary(binary);
long fileLength = objectLengthPathPair.getLeft();
String relativePath = objectLengthPathPair.getRight();
```
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/ObjectTypeUtils.java:
##########
@@ -40,6 +58,117 @@ public class ObjectTypeUtils {
private ObjectTypeUtils() {}
+ public static ByteBuffer readObjectContent(
+ Binary binary, long offset, int length, boolean mayNotInCurrentNode) {
+ Pair<Long, String> objectLengthPathPair =
ObjectTypeUtils.parseObjectBinary(binary);
+ long fileLength = objectLengthPathPair.getLeft();
+ String relativePath = objectLengthPathPair.getRight();
+ int actualReadSize =
+ ObjectTypeUtils.getActualReadSize(
+ relativePath, fileLength, offset, length < 0 ? fileLength :
length);
+ return ObjectTypeUtils.readObjectContent(
+ relativePath, offset, actualReadSize, mayNotInCurrentNode);
+ }
+
+ public static ByteBuffer readObjectContent(
+ String relativePath, long offset, int readSize, boolean
mayNotInCurrentNode) {
+ Optional<File> objectFile =
TIER_MANAGER.getAbsoluteObjectFilePath(relativePath, false);
+ if (objectFile.isPresent()) {
+ return readObjectContentFromLocalFile(objectFile.get(), offset,
readSize);
+ }
+ if (mayNotInCurrentNode) {
+ return readObjectContentFromRemoteFile(relativePath, offset, readSize);
+ }
+ throw new ObjectFileNotExist(relativePath);
+ }
+
+ private static ByteBuffer readObjectContentFromLocalFile(File file, long
offset, long readSize) {
+ byte[] bytes = new byte[(int) readSize];
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ try (FileChannel fileChannel = FileChannel.open(file.toPath(),
StandardOpenOption.READ)) {
+ fileChannel.read(buffer, offset);
+ } catch (IOException e) {
+ throw new IoTDBRuntimeException(e,
TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
+ }
+ buffer.flip();
+ return buffer;
+ }
+
+ private static ByteBuffer readObjectContentFromRemoteFile(
+ final String relativePath, final long offset, final int readSize) {
+ ByteBuffer buffer = ByteBuffer.allocate(readSize);
+ TConsensusGroupId consensusGroupId =
+ new TConsensusGroupId(
+ TConsensusGroupType.DataRegion,
+ Integer.parseInt(Paths.get(relativePath).getName(0).toString()));
Review Comment:
Potential uncaught 'java.lang.NumberFormatException'.
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/ObjectTypeUtils.java:
##########
@@ -40,6 +58,117 @@ public class ObjectTypeUtils {
private ObjectTypeUtils() {}
+ public static ByteBuffer readObjectContent(
+ Binary binary, long offset, int length, boolean mayNotInCurrentNode) {
+ Pair<Long, String> objectLengthPathPair =
ObjectTypeUtils.parseObjectBinary(binary);
+ long fileLength = objectLengthPathPair.getLeft();
+ String relativePath = objectLengthPathPair.getRight();
+ int actualReadSize =
+ ObjectTypeUtils.getActualReadSize(
+ relativePath, fileLength, offset, length < 0 ? fileLength :
length);
+ return ObjectTypeUtils.readObjectContent(
+ relativePath, offset, actualReadSize, mayNotInCurrentNode);
+ }
+
+ public static ByteBuffer readObjectContent(
+ String relativePath, long offset, int readSize, boolean
mayNotInCurrentNode) {
+ Optional<File> objectFile =
TIER_MANAGER.getAbsoluteObjectFilePath(relativePath, false);
+ if (objectFile.isPresent()) {
+ return readObjectContentFromLocalFile(objectFile.get(), offset,
readSize);
+ }
+ if (mayNotInCurrentNode) {
+ return readObjectContentFromRemoteFile(relativePath, offset, readSize);
+ }
+ throw new ObjectFileNotExist(relativePath);
+ }
+
+ private static ByteBuffer readObjectContentFromLocalFile(File file, long
offset, long readSize) {
+ byte[] bytes = new byte[(int) readSize];
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ try (FileChannel fileChannel = FileChannel.open(file.toPath(),
StandardOpenOption.READ)) {
+ fileChannel.read(buffer, offset);
+ } catch (IOException e) {
+ throw new IoTDBRuntimeException(e,
TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
+ }
+ buffer.flip();
+ return buffer;
+ }
+
+ private static ByteBuffer readObjectContentFromRemoteFile(
+ final String relativePath, final long offset, final int readSize) {
+ ByteBuffer buffer = ByteBuffer.allocate(readSize);
+ TConsensusGroupId consensusGroupId =
+ new TConsensusGroupId(
+ TConsensusGroupType.DataRegion,
+ Integer.parseInt(Paths.get(relativePath).getName(0).toString()));
+ List<TRegionReplicaSet> regionReplicaSetList =
+ ClusterPartitionFetcher.getInstance()
+ .getRegionReplicaSet(Collections.singletonList(consensusGroupId));
+ TRegionReplicaSet regionReplicaSet =
regionReplicaSetList.iterator().next();
+ final int batchSize = 1024 * 1024;
+ final TReadObjectReq req = new TReadObjectReq();
+ req.setRelativePath(relativePath);
+ for (int i = 0; i < regionReplicaSet.getDataNodeLocations().size(); i++) {
+ TDataNodeLocation dataNodeLocation =
regionReplicaSet.getDataNodeLocations().get(i);
Review Comment:
Potential `NullPointerException` or `IndexOutOfBoundsException` if
`regionReplicaSet.getDataNodeLocations()` is null or empty. The code should
check if the locations list is not empty before iterating.
```suggestion
List<TDataNodeLocation> dataNodeLocations =
regionReplicaSet.getDataNodeLocations();
if (dataNodeLocations == null || dataNodeLocations.isEmpty()) {
throw new IoTDBRuntimeException(
"No DataNode locations found for regionReplicaSet: " +
regionReplicaSet,
TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
}
for (int i = 0; i < dataNodeLocations.size(); i++) {
TDataNodeLocation dataNodeLocation = dataNodeLocations.get(i);
```
##########
iotdb-core/confignode/src/test/resources/confignode1conf/iotdb-system.properties:
##########
@@ -34,7 +34,7 @@ timestamp_precision=ms
data_region_consensus_protocol_class=org.apache.iotdb.consensus.iot.IoTConsensus
schema_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus
schema_replication_factor=3
-data_replication_factor=3
+data_replication_factor=1
Review Comment:
Changing `data_replication_factor` from 3 to 1 in a test configuration file
may break tests that rely on multi-replica behavior. This change should be
reverted unless there's a specific reason for it. If this change is intentional
for testing purposes, it should be documented or done in a separate, focused PR.
```suggestion
data_replication_factor=3
```
##########
iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/object/ObjectTypeCompactionTest.java:
##########
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.storageengine.dataregion.compaction.object;
+
+import org.apache.iotdb.commons.exception.MetadataException;
+import org.apache.iotdb.commons.schema.table.TsTable;
+import org.apache.iotdb.commons.schema.table.column.FieldColumnSchema;
+import org.apache.iotdb.commons.schema.table.column.TagColumnSchema;
+import org.apache.iotdb.db.exception.DiskSpaceInsufficientException;
+import org.apache.iotdb.db.exception.StorageEngineException;
+import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.AbstractCompactionTest;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.FastCompactionPerformer;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadChunkCompactionPerformer;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadPointCompactionPerformer;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.CrossSpaceCompactionTask;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.InnerSpaceCompactionTask;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.SettleCompactionTask;
+import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
+import
org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus;
+import org.apache.iotdb.db.storageengine.rescon.disk.TierManager;
+
+import org.apache.tsfile.enums.ColumnCategory;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.file.metadata.ColumnSchema;
+import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.file.metadata.StringArrayDeviceID;
+import org.apache.tsfile.file.metadata.TableSchema;
+import org.apache.tsfile.file.metadata.enums.CompressionType;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.Binary;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.write.chunk.AlignedChunkWriterImpl;
+import org.apache.tsfile.write.schema.MeasurementSchema;
+import org.apache.tsfile.write.writer.TsFileIOWriter;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Collections;
+
+public class ObjectTypeCompactionTest extends AbstractCompactionTest {
+
+ private static final TableSchema tableSchema =
+ new TableSchema(
+ "t1",
+ Arrays.asList(
+ new ColumnSchema("device", TSDataType.STRING,
ColumnCategory.TAG),
+ new ColumnSchema("s1", TSDataType.OBJECT,
ColumnCategory.FIELD)));
+
+ private String threadName;
+ private File objectDir;
+
+ @Before
+ public void setUp()
+ throws IOException, WriteProcessException, MetadataException,
InterruptedException {
+ this.threadName = Thread.currentThread().getName();
+ Thread.currentThread().setName("pool-1-IoTDB-Compaction-Worker-1");
+ DataNodeTableCache.getInstance().invalid(this.COMPACTION_TEST_SG);
+ createTable("t1", 1);
+ super.setUp();
+ try {
+ objectDir = new
File(TierManager.getInstance().getNextFolderForObjectFile());
+ } catch (DiskSpaceInsufficientException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @After
Review Comment:
This method overrides [AbstractCompactionTest.tearDown](1); it is advisable
to add an Override annotation.
```suggestion
@After
@Override
```
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/TableMetadataImpl.java:
##########
@@ -265,13 +265,14 @@ && isIntegerNumber(argumentTypes.get(2)))) {
}
return STRING;
} else if
(TableBuiltinScalarFunction.LENGTH.getFunctionName().equalsIgnoreCase(functionName))
{
- if (!(argumentTypes.size() == 1 && isCharType(argumentTypes.get(0)))) {
+ if (!(argumentTypes.size() == 1 && (isCharType(argumentTypes.get(0)))
+ || isObjectType(argumentTypes.get(0)))) {
Review Comment:
The condition logic is incorrect. The parentheses are misplaced, causing
incorrect evaluation.
Current logic: `(argumentTypes.size() == 1 &&
isCharType(argumentTypes.get(0))) || isObjectType(argumentTypes.get(0))`
This evaluates to true when `isObjectType(argumentTypes.get(0))` is true
**regardless** of the size of argumentTypes, which could allow invalid calls
with multiple arguments.
Should be: `argumentTypes.size() == 1 && (isCharType(argumentTypes.get(0))
|| isObjectType(argumentTypes.get(0)))`
```suggestion
if (!(argumentTypes.size() == 1 && (isCharType(argumentTypes.get(0))
|| isObjectType(argumentTypes.get(0))))) {
```
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/ObjectTypeUtils.java:
##########
@@ -40,6 +58,117 @@ public class ObjectTypeUtils {
private ObjectTypeUtils() {}
+ public static ByteBuffer readObjectContent(
+ Binary binary, long offset, int length, boolean mayNotInCurrentNode) {
+ Pair<Long, String> objectLengthPathPair =
ObjectTypeUtils.parseObjectBinary(binary);
+ long fileLength = objectLengthPathPair.getLeft();
+ String relativePath = objectLengthPathPair.getRight();
+ int actualReadSize =
+ ObjectTypeUtils.getActualReadSize(
+ relativePath, fileLength, offset, length < 0 ? fileLength :
length);
+ return ObjectTypeUtils.readObjectContent(
+ relativePath, offset, actualReadSize, mayNotInCurrentNode);
+ }
+
+ public static ByteBuffer readObjectContent(
+ String relativePath, long offset, int readSize, boolean
mayNotInCurrentNode) {
+ Optional<File> objectFile =
TIER_MANAGER.getAbsoluteObjectFilePath(relativePath, false);
+ if (objectFile.isPresent()) {
+ return readObjectContentFromLocalFile(objectFile.get(), offset,
readSize);
+ }
+ if (mayNotInCurrentNode) {
+ return readObjectContentFromRemoteFile(relativePath, offset, readSize);
+ }
+ throw new ObjectFileNotExist(relativePath);
+ }
+
+ private static ByteBuffer readObjectContentFromLocalFile(File file, long
offset, long readSize) {
+ byte[] bytes = new byte[(int) readSize];
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ try (FileChannel fileChannel = FileChannel.open(file.toPath(),
StandardOpenOption.READ)) {
+ fileChannel.read(buffer, offset);
+ } catch (IOException e) {
+ throw new IoTDBRuntimeException(e,
TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
+ }
+ buffer.flip();
+ return buffer;
+ }
+
+ private static ByteBuffer readObjectContentFromRemoteFile(
+ final String relativePath, final long offset, final int readSize) {
+ ByteBuffer buffer = ByteBuffer.allocate(readSize);
+ TConsensusGroupId consensusGroupId =
+ new TConsensusGroupId(
+ TConsensusGroupType.DataRegion,
+ Integer.parseInt(Paths.get(relativePath).getName(0).toString()));
+ List<TRegionReplicaSet> regionReplicaSetList =
+ ClusterPartitionFetcher.getInstance()
+ .getRegionReplicaSet(Collections.singletonList(consensusGroupId));
Review Comment:
Potential `NoSuchElementException` if `regionReplicaSetList` is empty. The
code should check if the list is not empty before calling `iterator().next()`,
or handle the case when no replica set is found.
```suggestion
.getRegionReplicaSet(Collections.singletonList(consensusGroupId));
if (regionReplicaSetList.isEmpty()) {
throw new ObjectFileNotExist(relativePath);
}
```
##########
iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/object/ObjectTypeCompactionTest.java:
##########
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.storageengine.dataregion.compaction.object;
+
+import org.apache.iotdb.commons.exception.MetadataException;
+import org.apache.iotdb.commons.schema.table.TsTable;
+import org.apache.iotdb.commons.schema.table.column.FieldColumnSchema;
+import org.apache.iotdb.commons.schema.table.column.TagColumnSchema;
+import org.apache.iotdb.db.exception.DiskSpaceInsufficientException;
+import org.apache.iotdb.db.exception.StorageEngineException;
+import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.AbstractCompactionTest;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.FastCompactionPerformer;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadChunkCompactionPerformer;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadPointCompactionPerformer;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.CrossSpaceCompactionTask;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.InnerSpaceCompactionTask;
+import
org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.SettleCompactionTask;
+import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
+import
org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus;
+import org.apache.iotdb.db.storageengine.rescon.disk.TierManager;
+
+import org.apache.tsfile.enums.ColumnCategory;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.file.metadata.ColumnSchema;
+import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.file.metadata.StringArrayDeviceID;
+import org.apache.tsfile.file.metadata.TableSchema;
+import org.apache.tsfile.file.metadata.enums.CompressionType;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.Binary;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.write.chunk.AlignedChunkWriterImpl;
+import org.apache.tsfile.write.schema.MeasurementSchema;
+import org.apache.tsfile.write.writer.TsFileIOWriter;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Collections;
+
+public class ObjectTypeCompactionTest extends AbstractCompactionTest {
+
+ private static final TableSchema tableSchema =
+ new TableSchema(
+ "t1",
+ Arrays.asList(
+ new ColumnSchema("device", TSDataType.STRING,
ColumnCategory.TAG),
+ new ColumnSchema("s1", TSDataType.OBJECT,
ColumnCategory.FIELD)));
+
+ private String threadName;
+ private File objectDir;
+
+ @Before
Review Comment:
This method overrides [AbstractCompactionTest.setUp](1); it is advisable to
add an Override annotation.
```suggestion
@Before
@Override
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]