fatemehp commented on code in PR #14142:
URL: https://github.com/apache/arrow/pull/14142#discussion_r975762682


##########
cpp/src/parquet/column_reader.cc:
##########
@@ -1328,6 +1329,156 @@ class TypedRecordReader : public 
ColumnReaderImplBase<DType>,
 
     return records_read;
   }
+  
+  // Skip records that we have in our buffer. This function is only for
+  // non-repeated fields.
+  int64_t SkipRecordsInBufferNonRepeated(int64_t num_records) {
+    ARROW_DCHECK(this->max_rep_level_ == 0);
+    ARROW_DCHECK(this->has_values_to_process());
+
+    int64_t remaining_records = levels_written_ - levels_position_;
+    int64_t skipped_records = std::min(num_records, remaining_records);
+    int64_t start_levels_position = levels_position_;
+    // Since there is no repetition, number of levels equals number of records.
+    levels_position_ += skipped_records;
+    // We skipped the levels by incrementing 'levels_position_'. For values
+    // we do not have a buffer, so we need to read them and throw them away.
+    // First we need to figure out how many present/not-null values there are.
+    std::shared_ptr<::arrow::ResizableBuffer> valid_bits;
+    valid_bits = AllocateBuffer(this->pool_);
+    PARQUET_THROW_NOT_OK(
+        valid_bits->Resize(bit_util::BytesForBits(skipped_records), true));
+    ValidityBitmapInputOutput validity_io;
+    validity_io.values_read_upper_bound = skipped_records;
+    validity_io.valid_bits = valid_bits->mutable_data();
+    validity_io.valid_bits_offset = 0;
+    DefLevelsToBitmap(def_levels() + start_levels_position,
+                      levels_position_ - start_levels_position,
+                      this->leaf_info_, &validity_io);
+    int64_t values_to_read = validity_io.values_read - validity_io.null_count;
+    ReadAndThrowAway(values_to_read);
+    // Mark the levels as read in the underlying column reader.
+    this->ConsumeBufferedValues(skipped_records);
+    return skipped_records;
+  }
+
+  // Skip records for repeated fields. Returns number of skipped records.
+  // Skip records for repeated fields. Returns number of skipped records.
+  int64_t SkipRecordsRepeated(int64_t num_records) {
+    ARROW_DCHECK_GT(this->max_rep_level_, 0);
+
+    // For repeated fields, we are technically reading and throwing away the
+    // levels and values since we do not know the record boundaries in advance.
+    // Keep filling the buffer and skipping until we reach the desired number
+    // of records or we run out of values in the column chunk.
+    int64_t skipped_records = 0;
+    int64_t level_batch_size = std::max<int>(kMinLevelBatchSize, num_records);
+    // If 'at_record_start_' is false, but (skip_records == num_records), it
+    // means that for the last record that was counted, we have not seen all
+    // of it's values yet.
+    while (!at_record_start_ || skipped_records < num_records) {
+      // Is there more data to read in this row group?
+      // HasNextInternal() will advance to the next page if necessary.
+      if (!this->HasNextInternal()) {
+        if (!at_record_start_) {
+          // We ended the row group while inside a record that we haven't seen
+          // the end of yet. So increment the record count for the last record
+          // in the row group
+          ++skipped_records;
+          at_record_start_ = true;
+        }
+        break;
+      }
+
+      // Read some more levels.
+      int64_t batch_size =
+          std::min(level_batch_size, available_values_current_page());
+      // No more data in column. This must be an empty page.
+      // If we had exhausted the last page, HasNextInternal() must have 
advanced
+      // to the next page. So there must be available values to process.
+      if (batch_size == 0) {
+        break;
+      }
+
+      ReserveLevels(batch_size);
+
+      int16_t* def_levels = this->def_levels() + levels_written_;
+      int16_t* rep_levels = this->rep_levels() + levels_written_;
+
+      int64_t levels_read = 0;
+      levels_read = this->ReadDefinitionLevels(batch_size, def_levels);
+      if (this->ReadRepetitionLevels(batch_size, rep_levels) != levels_read) {
+        throw ParquetException(
+            "Number of decoded rep / def levels did not match");
+      }
+
+      levels_written_ += levels_read;
+
+      // Look at the buffered levels, delimit them based on
+      // (rep_level == 0), report back how many records are in there, and
+      // fill in how many not-null values (def_level == max_def_level_).
+      // DelimitRecords updates levels_position_.
+      int64_t remaining_records = num_records - skipped_records;
+      int64_t start_levels_position = levels_position_;
+      int64_t values_seen = 0;
+      skipped_records += DelimitRecords(remaining_records, &values_seen);
+      this->ConsumeBufferedValues(levels_position_ - start_levels_position);
+      ReadAndThrowAway(values_seen);
+    }
+
+    return skipped_records;
+  }
+
+  // Read 'num_values' values and throw them away.
+  int64_t ReadAndThrowAway(int64_t num_values) {
+    int64_t values_left = num_values;
+    int64_t batch_size = 1024;  // ReadBatch with a smaller memory footprint
+    int64_t values_read = 0;
+
+    // This will be enough scratch space to accommodate 16-bit levels or any
+    // value type
+    int value_size = type_traits<DType::type_num>::value_byte_size;
+    std::shared_ptr<ResizableBuffer> scratch = AllocateBuffer(

Review Comment:
   I think it should work since the value for the variable length types is 
technically a length and a pointer. I will add a separate test for them.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to