westonpace commented on a change in pull request #11616:
URL: https://github.com/apache/arrow/pull/11616#discussion_r785171485
##########
File path: cpp/src/arrow/ipc/read_write_test.cc
##########
@@ -2715,6 +2727,133 @@ TEST(TestRecordBatchFileReaderIo,
ReadTwoContinousFieldsWithIoMerged) {
GetReadRecordBatchReadRanges(64, {0, 1}, {8 + 64 * 4});
}
+constexpr static int kNumBatches = 10;
+// It can be difficult to know the exact size of the schema. Instead we just
make the
+// row data big enough that we can easily identify if a read is for a schema
or for
+// row data.
+//
+// This needs to be large enough to space record batches kDefaultHoleSizeLimit
bytes apart
+// and also large enough that record batch data is more than
kMaxMetadataSizeBytes bytes
+constexpr static int kRowsPerBatch = 1000;
+constexpr static int64_t kMaxMetadataSizeBytes = 1 << 13;
+// There are always 2 reads when the file is opened
+constexpr static int kNumReadsOnOpen = 2;
Review comment:
Ended up filing it as a follow-up ARROW-15340
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]