coderex2522 commented on a change in pull request #1067:
URL: https://github.com/apache/orc/pull/1067#discussion_r829636559



##########
File path: c++/src/io/OutputStream.cc
##########
@@ -96,6 +96,9 @@ namespace orc {
     dataBuffer->resize(0);
     return dataSize;
   }
+  void BufferedOutputStream::suppress() {

Review comment:
       done.

##########
File path: c++/src/ByteRLE.cc
##########
@@ -203,6 +205,17 @@ namespace orc {
     recorder->add(static_cast<uint64_t>(numLiterals));
   }
 
+  void ByteRleEncoderImpl::suppress() {
+    // written data can be just ignored because they are only flushed in memory
+    outputStream->suppress();
+
+    numLiterals = 0;

Review comment:
       I don't find the reset function, so I add this function by the way.

##########
File path: c++/test/TestReader.cc
##########
@@ -672,4 +672,71 @@ namespace orc {
     EXPECT_EQ(1, nestedUnionBatch.offsets.data()[1]);
   }
 
+  TEST(TestReadIntent, testSuppressPresentStream) {
+    MemoryOutputStream memStream(DEFAULT_MEM_STREAM_SIZE);
+    MemoryPool* pool = getDefaultPool();
+    size_t rowCount = 2000;
+    {
+      auto type = std::unique_ptr<Type>(
+        Type::buildTypeFromString("struct<col1:int,col2:int>"));
+      WriterOptions options;
+      options.setStripeSize(1024 * 1024)
+          .setCompressionBlockSize(1024)
+          .setCompression(CompressionKind_NONE)
+          .setMemoryPool(pool)
+          .setRowIndexStride(1000);
+
+      auto writer = createWriter(*type, &memStream, options);
+      auto batch = writer->createRowBatch(rowCount);
+      auto& structBatch = dynamic_cast<StructVectorBatch&>(*batch);
+      auto& longBatch1 = 
dynamic_cast<LongVectorBatch&>(*structBatch.fields[0]);
+      auto& longBatch2 = 
dynamic_cast<LongVectorBatch&>(*structBatch.fields[1]);
+      structBatch.numElements = rowCount;
+      longBatch1.numElements = rowCount;
+      for (size_t i = 0; i < rowCount; ++i) {
+        if (i % 2 == 0) {
+          longBatch1.notNull[i] = false;
+        } else {
+          longBatch1.notNull[i] = true;
+          longBatch1.data[i] = static_cast<int64_t>(i*100);
+        }
+        longBatch2.data[i] = static_cast<int64_t>(i*300);
+      }
+      writer->add(*batch);
+      writer->close();
+    }
+    // read file & check the present stream
+    {
+      std::unique_ptr<InputStream> inStream(
+        new MemoryInputStream(memStream.getData(), memStream.getLength()));
+      ReaderOptions readerOptions;
+      readerOptions.setMemoryPool(*pool);
+      std::unique_ptr<Reader> reader =
+        createReader(std::move(inStream), readerOptions);
+      EXPECT_EQ(rowCount, reader->getNumberOfRows());
+      std::unique_ptr<RowReader> rowReader =
+        reader->createRowReader(RowReaderOptions());
+      auto batch = rowReader->createRowBatch(2048);
+      EXPECT_TRUE(rowReader->next(*batch));
+      EXPECT_EQ(rowCount, batch->numElements);

Review comment:
       Sure, based on your suggestion, I added more coverage to this test.

##########
File path: c++/test/TestReader.cc
##########
@@ -672,4 +672,71 @@ namespace orc {
     EXPECT_EQ(1, nestedUnionBatch.offsets.data()[1]);
   }
 
+  TEST(TestReadIntent, testSuppressPresentStream) {
+    MemoryOutputStream memStream(DEFAULT_MEM_STREAM_SIZE);
+    MemoryPool* pool = getDefaultPool();
+    size_t rowCount = 2000;
+    {
+      auto type = std::unique_ptr<Type>(
+        Type::buildTypeFromString("struct<col1:int,col2:int>"));
+      WriterOptions options;
+      options.setStripeSize(1024 * 1024)
+          .setCompressionBlockSize(1024)
+          .setCompression(CompressionKind_NONE)
+          .setMemoryPool(pool)
+          .setRowIndexStride(1000);
+
+      auto writer = createWriter(*type, &memStream, options);
+      auto batch = writer->createRowBatch(rowCount);
+      auto& structBatch = dynamic_cast<StructVectorBatch&>(*batch);
+      auto& longBatch1 = 
dynamic_cast<LongVectorBatch&>(*structBatch.fields[0]);
+      auto& longBatch2 = 
dynamic_cast<LongVectorBatch&>(*structBatch.fields[1]);
+      structBatch.numElements = rowCount;
+      longBatch1.numElements = rowCount;
+      for (size_t i = 0; i < rowCount; ++i) {
+        if (i % 2 == 0) {
+          longBatch1.notNull[i] = false;
+        } else {
+          longBatch1.notNull[i] = true;
+          longBatch1.data[i] = static_cast<int64_t>(i*100);
+        }
+        longBatch2.data[i] = static_cast<int64_t>(i*300);
+      }
+      writer->add(*batch);
+      writer->close();
+    }
+    // read file & check the present stream
+    {
+      std::unique_ptr<InputStream> inStream(
+        new MemoryInputStream(memStream.getData(), memStream.getLength()));
+      ReaderOptions readerOptions;
+      readerOptions.setMemoryPool(*pool);
+      std::unique_ptr<Reader> reader =
+        createReader(std::move(inStream), readerOptions);
+      EXPECT_EQ(rowCount, reader->getNumberOfRows());
+      std::unique_ptr<RowReader> rowReader =
+        reader->createRowReader(RowReaderOptions());
+      auto batch = rowReader->createRowBatch(2048);
+      EXPECT_TRUE(rowReader->next(*batch));
+      EXPECT_EQ(rowCount, batch->numElements);
+      // fetch StripeFooter from pb stream
+      std::unique_ptr<StripeInformation> stripeInfo = reader->getStripe(0);
+      ReaderImpl* readerImpl = dynamic_cast<ReaderImpl*>(reader.get());
+      std::unique_ptr<SeekableInputStream> pbStream(
+        new SeekableFileInputStream(readerImpl->getStream(),
+        stripeInfo->getOffset() + stripeInfo->getIndexLength() + 
stripeInfo->getDataLength(),
+        stripeInfo->getFooterLength(),
+        *pool));
+      proto::StripeFooter stripeFooter;
+      if (!stripeFooter.ParseFromZeroCopyStream(pbStream.get())) {
+        throw ParseError("Parse stripe footer from pb stream failed");
+      }
+      for (int i = 0; i < stripeFooter.streams_size(); ++i) {
+        const proto::Stream& stream = stripeFooter.streams(i);
+        if (stream.has_kind() && stream.kind() == proto::Stream_Kind_PRESENT) {
+          EXPECT_EQ(stream.column(), 1UL);
+        }
+      }
+    }

Review comment:
       done.

##########
File path: c++/test/TestReader.cc
##########
@@ -672,4 +672,71 @@ namespace orc {
     EXPECT_EQ(1, nestedUnionBatch.offsets.data()[1]);
   }
 
+  TEST(TestReadIntent, testSuppressPresentStream) {

Review comment:
       I think this feature is mainly a modification on the writer side. So I 
move the test into TestWriter.cc

##########
File path: c++/src/ColumnWriter.cc
##########
@@ -139,10 +140,22 @@ namespace orc {
                          uint64_t offset,
                          uint64_t numValues,
                          const char* incomingMask) {
-    notNullEncoder->add(batch.notNull.data() + offset, numValues, 
incomingMask);
+    const char* notNull = batch.notNull.data() + offset;
+    notNullEncoder->add(notNull, numValues, incomingMask);
+    for (uint64_t i = 0; i < numValues; ++i) {

Review comment:
       Done.

##########
File path: c++/src/ColumnWriter.cc
##########
@@ -139,10 +140,22 @@ namespace orc {
                          uint64_t offset,
                          uint64_t numValues,
                          const char* incomingMask) {
-    notNullEncoder->add(batch.notNull.data() + offset, numValues, 
incomingMask);
+    const char* notNull = batch.notNull.data() + offset;
+    notNullEncoder->add(notNull, numValues, incomingMask);
+    for (uint64_t i = 0; i < numValues; ++i) {
+      if (!notNull[i]) {
+        hasNullValue = true;

Review comment:
       This logic has been changed to take the or operation with batch.hasNulls.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@orc.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to