This is an automated email from the ASF dual-hosted git repository.

sunzesong pushed a commit to branch tsfile_v4
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/tsfile_v4 by this push:
     new 76139f38cd Fix hive-connector module
76139f38cd is described below

commit 76139f38cd1fb9a57be5779bad3225fd86978a7f
Author: Zesong Sun <[email protected]>
AuthorDate: Fri Jun 10 08:35:52 2022 +0800

    Fix hive-connector module
---
 .../org/apache/iotdb/hive/TSFHiveRecordWriter.java |  4 +-
 .../iotdb/spark/tsfile/io/TsFileRecordWriter.java  |  8 ++--
 .../apache/iotdb/spark/tsfile/DefaultSource.scala  |  8 ++--
 .../iotdb/spark/tsfile/NarrowConverter.scala       |  7 +--
 .../apache/iotdb/spark/tsfile/WideConverter.scala  |  7 +--
 .../apache/iotdb/spark/tsfile/ConverterTest.scala  | 27 ++++++++----
 .../apache/iotdb/spark/tsfile/HDFSInputTest.java   | 36 ++++++++++-----
 .../org/apache/iotdb/spark/tsfile/TSFileSuit.scala | 51 +++++++++++++---------
 8 files changed, 93 insertions(+), 55 deletions(-)

diff --git 
a/hive-connector/src/main/java/org/apache/iotdb/hive/TSFHiveRecordWriter.java 
b/hive-connector/src/main/java/org/apache/iotdb/hive/TSFHiveRecordWriter.java
index f261448452..bead80b8ad 100644
--- 
a/hive-connector/src/main/java/org/apache/iotdb/hive/TSFHiveRecordWriter.java
+++ 
b/hive-connector/src/main/java/org/apache/iotdb/hive/TSFHiveRecordWriter.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.hive;
 import org.apache.iotdb.hadoop.fileSystem.HDFSOutput;
 import org.apache.iotdb.hadoop.tsfile.TSFRecordWriter;
 import org.apache.iotdb.hadoop.tsfile.record.HDFSTSRecord;
+import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
 import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
 import org.apache.iotdb.tsfile.write.TsFileWriter;
 import org.apache.iotdb.tsfile.write.schema.Schema;
@@ -47,7 +48,8 @@ public class TSFHiveRecordWriter implements 
FileSinkOperator.RecordWriter {
   public TSFHiveRecordWriter(JobConf job, Path path, Schema schema) throws 
IOException {
 
     HDFSOutput hdfsOutput = new HDFSOutput(path.toString(), job, false);
-    writer = new TsFileWriter(hdfsOutput, schema);
+    HDFSOutput indexFileOutput = new HDFSOutput(path + 
TsFileConstant.INDEX_SUFFIX, job, false);
+    writer = new TsFileWriter(hdfsOutput, indexFileOutput, schema);
   }
 
   @Override
diff --git 
a/spark-tsfile/src/main/java/org/apache/iotdb/spark/tsfile/io/TsFileRecordWriter.java
 
b/spark-tsfile/src/main/java/org/apache/iotdb/spark/tsfile/io/TsFileRecordWriter.java
index 889e7b5ae5..b42725cacb 100644
--- 
a/spark-tsfile/src/main/java/org/apache/iotdb/spark/tsfile/io/TsFileRecordWriter.java
+++ 
b/spark-tsfile/src/main/java/org/apache/iotdb/spark/tsfile/io/TsFileRecordWriter.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.spark.tsfile.io;
 
 import org.apache.iotdb.hadoop.fileSystem.HDFSOutput;
+import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
 import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
 import org.apache.iotdb.tsfile.write.TsFileWriter;
 import org.apache.iotdb.tsfile.write.record.TSRecord;
@@ -33,13 +34,14 @@ import java.io.IOException;
 
 public class TsFileRecordWriter extends RecordWriter<NullWritable, TSRecord> {
 
-  private TsFileWriter tsFileWriter = null;
+  private TsFileWriter tsFileWriter;
 
   public TsFileRecordWriter(TaskAttemptContext job, Path file, Schema schema) 
throws IOException {
     HDFSOutput hdfsOutput =
         new HDFSOutput(file.toString(), job.getConfiguration(), false); // 
NOTE overwrite false here
-
-    tsFileWriter = new TsFileWriter(hdfsOutput, schema);
+    HDFSOutput indexFileOutput =
+        new HDFSOutput(file + TsFileConstant.INDEX_SUFFIX, 
job.getConfiguration(), false);
+    tsFileWriter = new TsFileWriter(hdfsOutput, indexFileOutput, schema);
   }
 
   @Override
diff --git 
a/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/DefaultSource.scala 
b/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/DefaultSource.scala
index 8f581ac5d3..c1ebcf50af 100755
--- 
a/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/DefaultSource.scala
+++ 
b/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/DefaultSource.scala
@@ -28,7 +28,7 @@ import org.apache.hadoop.mapreduce.Job
 import org.apache.iotdb.hadoop.fileSystem.HDFSInput
 import org.apache.iotdb.spark.tsfile.DefaultSource.SerializableConfiguration
 import org.apache.iotdb.spark.tsfile.qp.Executor
-import org.apache.iotdb.tsfile.common.constant.QueryConstant
+import org.apache.iotdb.tsfile.common.constant.{QueryConstant, TsFileConstant}
 import org.apache.iotdb.tsfile.read.common.Field
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet
 import org.apache.iotdb.tsfile.read.{TsFileReader, TsFileSequenceReader}
@@ -102,9 +102,9 @@ private[tsfile] class DefaultSource extends FileFormat with 
DataSourceRegister {
       log.info(file.toString())
 
       val conf = broadcastedConf.value.value
-      val in = new HDFSInput(new Path(new URI(file.filePath)), conf)
-
-      val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+      val tsfileInput = new HDFSInput(new Path(new URI(file.filePath)), conf)
+      val indexFileInput = new HDFSInput(new Path(new URI(file.filePath + 
TsFileConstant.INDEX_SUFFIX)), conf)
+      val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
 
       val tsFileMetaData = reader.readFileMetadata
 
diff --git 
a/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/NarrowConverter.scala
 
b/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/NarrowConverter.scala
index 2af440b934..60785ab347 100644
--- 
a/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/NarrowConverter.scala
+++ 
b/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/NarrowConverter.scala
@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.FileStatus
 import org.apache.iotdb.hadoop.fileSystem.HDFSInput
 import org.apache.iotdb.spark.tsfile.qp.QueryProcessor
-import org.apache.iotdb.tsfile.common.constant.QueryConstant
+import org.apache.iotdb.tsfile.common.constant.{QueryConstant, TsFileConstant}
 import org.apache.iotdb.tsfile.file.metadata.enums.{TSDataType, TSEncoding}
 import org.apache.iotdb.spark.tsfile.qp.common.{BasicOperator, FilterOperator, 
SQLConstant, TSQueryPlan}
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata
@@ -68,8 +68,9 @@ object NarrowConverter extends Converter {
     var seriesSet: mutable.Set[String] = mutable.Set()
 
     files.foreach(f => {
-      val in = new HDFSInput(f.getPath, conf)
-      val reader = new TsFileSequenceReader(in)
+      val tsfileInput = new HDFSInput(f.getPath, conf)
+      val indexFileInput = new HDFSInput(f.getPath + 
TsFileConstant.INDEX_SUFFIX, conf)
+      val reader = new TsFileSequenceReader(tsfileInput, indexFileInput)
       val measurements = reader.getAllMeasurements
 
       measurements.foreach(m => {
diff --git 
a/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/WideConverter.scala 
b/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/WideConverter.scala
index 945ff17d5e..604b2c07ef 100755
--- 
a/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/WideConverter.scala
+++ 
b/spark-tsfile/src/main/scala/org/apache/iotdb/spark/tsfile/WideConverter.scala
@@ -24,7 +24,7 @@ import java.util
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.FileStatus
 import org.apache.iotdb.hadoop.fileSystem.HDFSInput
-import org.apache.iotdb.tsfile.common.constant.QueryConstant
+import org.apache.iotdb.tsfile.common.constant.{QueryConstant, TsFileConstant}
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata
 import org.apache.iotdb.tsfile.file.metadata.enums.{TSDataType, TSEncoding}
 import org.apache.iotdb.tsfile.read.TsFileSequenceReader
@@ -90,8 +90,9 @@ object WideConverter extends Converter {
     var seriesSet: mutable.Set[String] = mutable.Set()
 
     files.foreach(f => {
-      val in = new HDFSInput(f.getPath, conf)
-      val reader = new TsFileSequenceReader(in)
+      val tsfileInput = new HDFSInput(f.getPath, conf)
+      val indexFileInput = new HDFSInput(f.getPath + 
TsFileConstant.INDEX_SUFFIX, conf)
+      val reader = new TsFileSequenceReader(tsfileInput, indexFileInput)
       val devices = reader.getAllDevices
       val measurements = reader.getAllMeasurements
 
diff --git 
a/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/ConverterTest.scala 
b/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/ConverterTest.scala
index cfde2437a0..e5b199811f 100644
--- 
a/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/ConverterTest.scala
+++ 
b/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/ConverterTest.scala
@@ -44,7 +44,9 @@ import org.scalatest.{BeforeAndAfterAll, FunSuite}
 class ConverterTest extends FunSuite with BeforeAndAfterAll {
   private val tsfileFolder = 
TestConstant.BASE_OUTPUT_PATH.concat("ConverterTest")
   private val tsfilePath1: String = tsfileFolder + "/test_1.tsfile"
+  private val tsfileIndexPath1: String = tsfileFolder + "/test_1.tsfile.index"
   private val tsfilePath2: String = tsfileFolder + "/test_2.tsfile"
+  private val tsfileIndexPath2: String = tsfileFolder + "/test_2.tsfile.index"
   private var spark: SparkSession = _
   private var conf: Configuration = _
 
@@ -85,8 +87,9 @@ class ConverterTest extends FunSuite with BeforeAndAfterAll {
   }
 
   test("getSeries") {
-    val in = new HDFSInput(new Path(new URI(tsfilePath1)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfilePath1)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndexPath1)), 
conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
 
     val series = WideConverter.getSeries(tsFileMetaData, reader)
@@ -99,7 +102,8 @@ class ConverterTest extends FunSuite with BeforeAndAfterAll {
     Assert.assertEquals("[device_2.sensor_1,FLOAT]", series.get(4).toString)
     Assert.assertEquals("[device_2.sensor_2,INT32]", series.get(5).toString)
 
-    in.close()
+    tsfileInput.close()
+    indexFileInput.close()
   }
 
   test("getUnionSeries") {
@@ -170,8 +174,9 @@ class ConverterTest extends FunSuite with BeforeAndAfterAll 
{
   }
 
   test("prep4requiredSchema1") {
-    val in = new HDFSInput(new Path(new URI(tsfilePath1)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfilePath1)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndexPath1)), 
conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
 
     val requiredFields: util.ArrayList[StructField] = new 
util.ArrayList[StructField]()
@@ -189,12 +194,15 @@ class ConverterTest extends FunSuite with 
BeforeAndAfterAll {
     Assert.assertEquals("StructField(device_1.sensor_1,FloatType,true)", 
fields(1).toString)
     Assert.assertEquals("StructField(device_1.sensor_2,IntegerType,true)", 
fields(2).toString)
 
-    in.close()
+    tsfileInput.close()
+    indexFileInput.close()
   }
 
   test("prepSchema") {
-    val in = new HDFSInput(new Path(new URI(tsfilePath1)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfilePath1)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndexPath1)), 
conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
+
     val tsFileMetaData = reader.readFileMetadata
 
     val requiredFields: util.ArrayList[StructField] = new 
util.ArrayList[StructField]()
@@ -212,7 +220,8 @@ class ConverterTest extends FunSuite with BeforeAndAfterAll 
{
     Assert.assertEquals("StructField(device_2.sensor_1,FloatType,true)", 
fields(4).toString)
     Assert.assertEquals("StructField(device_2.sensor_2,IntegerType,true)", 
fields(5).toString)
 
-    in.close()
+    tsfileInput.close()
+    indexFileInput.close()
   }
 
   test("toTsRecord") {
diff --git 
a/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/HDFSInputTest.java 
b/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/HDFSInputTest.java
index 67538a41fa..820a088c50 100644
--- 
a/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/HDFSInputTest.java
+++ 
b/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/HDFSInputTest.java
@@ -18,23 +18,26 @@
  */
 package org.apache.iotdb.spark.tsfile;
 
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
+import org.apache.iotdb.hadoop.fileSystem.HDFSInput;
 import org.apache.iotdb.spark.constant.TestConstant;
 import org.apache.iotdb.spark.tool.TsFileWriteTool;
-import org.apache.iotdb.hadoop.fileSystem.HDFSInput;
+import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
 public class HDFSInputTest {
 
   private String folder = 
TestConstant.BASE_OUTPUT_PATH.concat("test-output/HDFSInputTest");
   private String path = folder + "/test.tsfile";
-  private HDFSInput in;
+  private HDFSInput tsfileInput;
+  private HDFSInput indexFileInput;
 
   @Before
   public void before() throws Exception {
@@ -45,12 +48,13 @@ public class HDFSInputTest {
     tsfile_folder.mkdirs();
     TsFileWriteTool tsFileWrite = new TsFileWriteTool();
     tsFileWrite.create1(path);
-    in = new HDFSInput(path);
+    tsfileInput = new HDFSInput(path);
+    indexFileInput = new HDFSInput(path + TsFileConstant.INDEX_SUFFIX);
   }
 
   @After
   public void after() throws IOException {
-    in.close();
+    tsfileInput.close();
     File tsfile_folder = new File(folder);
     deleteDir(tsfile_folder);
   }
@@ -66,16 +70,24 @@ public class HDFSInputTest {
 
   @Test
   public void test_read1() throws IOException {
-    int size = 500;
+    int size = 300;
     ByteBuffer buffer = ByteBuffer.allocate(size);
-    Assert.assertEquals(size, in.read(buffer));
+    Assert.assertEquals(size, tsfileInput.read(buffer));
+
+    int indexSize = 300;
+    buffer = ByteBuffer.allocate(indexSize);
+    Assert.assertEquals(indexSize, indexFileInput.read(buffer));
   }
 
   @Test
   public void test_read2() throws IOException {
-    int size = 500;
+    int size = 200;
     long pos = 20L;
     ByteBuffer buffer = ByteBuffer.allocate(size);
-    Assert.assertEquals(size, in.read(buffer, pos));
+    Assert.assertEquals(size, tsfileInput.read(buffer, pos));
+
+    int indexSize = 200;
+    buffer = ByteBuffer.allocate(indexSize);
+    Assert.assertEquals(indexSize, indexFileInput.read(buffer, pos));
   }
 }
diff --git 
a/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/TSFileSuit.scala 
b/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/TSFileSuit.scala
index 53bf186a67..99965490a2 100644
--- a/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/TSFileSuit.scala
+++ b/spark-tsfile/src/test/scala/org/apache/iotdb/spark/tsfile/TSFileSuit.scala
@@ -48,6 +48,7 @@ class TSFileSuit extends FunSuite with BeforeAndAfterAll {
 
   private val tsfileFolder3 = baseFolder + "/tsfileFolder3"
   private val tsfile4 = tsfileFolder3 + "/test.tsfile"
+  private val tsfileIndex4 = tsfileFolder3 + "/test.tsfile.index"
 
   private val outputPath = baseFolder + "/output"
   private val outputPathFile = outputPath + "/part-m-00000"
@@ -340,8 +341,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
    */
   test("partition test: narrow table, global time filter, small partition 
size") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(1).getOffsetOfChunkHeader
@@ -403,8 +405,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
    */
   test("partition test: narrow table, global time filter, larger partition 
size") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -453,8 +456,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, global time filter 1") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -476,8 +480,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, global time filter 2") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -500,8 +505,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, global time filter 3") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -523,8 +529,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: narrow table, value filter") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -546,8 +553,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, value filter 1") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -569,8 +577,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, value filter 2") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -593,8 +602,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, value filter 3") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader
@@ -617,8 +627,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
 
   test("partition test: wide table, value filter 4") {
     var conf: Configuration = spark.sparkContext.hadoopConfiguration
-    val in = new HDFSInput(new Path(new URI(tsfile4)), conf)
-    val reader: TsFileSequenceReader = new TsFileSequenceReader(in)
+    val tsfileInput = new HDFSInput(new Path(new URI(tsfile4)), conf)
+    val indexFileInput = new HDFSInput(new Path(new URI(tsfileIndex4)), conf)
+    val reader: TsFileSequenceReader = new TsFileSequenceReader(tsfileInput, 
indexFileInput)
     val tsFileMetaData = reader.readFileMetadata
     val chunkMetadataList = reader.getChunkMetadataList(new 
common.Path("device_1", "sensor_1"))
     val endOffsetOfChunkGroup = chunkMetadataList.get(2).getOffsetOfChunkHeader

Reply via email to