lidavidm commented on code in PR #14151:
URL: https://github.com/apache/arrow/pull/14151#discussion_r995890826
##########
java/dataset/src/main/java/org/apache/arrow/dataset/file/JniWrapper.java:
##########
@@ -45,4 +45,20 @@ private JniWrapper() {
*/
public native long makeFileSystemDatasetFactory(String uri, int fileFormat);
+ /**
+ * Write all record batches in a {@link NativeRecordBatchIterator} into
files. This internally
+ * depends on C++ write API: FileSystemDataset::Write.
+ *
+ * @param schema_address the schema address
+ * @param fileFormat target file format (ID)
+ * @param uri target file uri
+ * @param partitionColumns columns used to partition output files
+ * @param maxPartitions maximum partitions to be included in written files
+ * @param baseNameTemplate file name template used to make partitions. E.g.
"dat_{i}", i is current partition
+ * ID around all written files.
+ */
+ public native void writeFromScannerToFile(long stream_address, long
schema_address,
Review Comment:
ArrowArrayStream has a schema already
##########
java/dataset/src/main/java/org/apache/arrow/dataset/file/DatasetFileWriter.java:
##########
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.arrow.dataset.file;
+
+import org.apache.arrow.c.ArrowArrayStream;
+import org.apache.arrow.c.ArrowSchema;
+import org.apache.arrow.c.Data;
+import org.apache.arrow.dataset.scanner.ScanTask;
+import org.apache.arrow.dataset.scanner.Scanner;
+import org.apache.arrow.memory.BufferAllocator;
+import org.apache.arrow.util.AutoCloseables;
+import org.apache.arrow.vector.ipc.ArrowReader;
+import org.apache.arrow.vector.util.SchemaUtility;
+
+import java.util.Iterator;
+
+/**
+ * JNI-based utility to write datasets into files. It internally depends on
C++ static method
+ * FileSystemDataset::Write.
+ */
+public class DatasetFileWriter {
+
+ /**
+ * Scan over an input {@link Scanner} then write all record batches to file.
+ *
+ * @param scanner the source scanner for writing
+ * @param format target file format
+ * @param uri target file uri
+ * @param maxPartitions maximum partitions to be included in written files
+ * @param partitionColumns columns used to partition output files. Empty to
disable partitioning
+ * @param baseNameTemplate file name template used to make partitions. E.g.
"dat_{i}", i is current partition
+ * ID around all written files.
+ */
+ public static void write(BufferAllocator allocator, Scanner scanner,
FileFormat format, String uri,
+ String[] partitionColumns, int maxPartitions,
String baseNameTemplate) {
+ ArrowSchema arrowSchema = ArrowSchema.allocateNew(allocator);
+ Data.exportSchema(allocator, scanner.schema(), null, arrowSchema);
+ RuntimeException throwableWrapper = null;
+ try {
+ Iterator<? extends ScanTask> taskIterators = scanner.scan().iterator();
+ while (taskIterators.hasNext()) {
+ ArrowReader currentReader = taskIterators.next().execute();
+ ArrowArrayStream stream = ArrowArrayStream.allocateNew(allocator);
+ Data.exportArrayStream(allocator, currentReader, stream);
+ JniWrapper.get().writeFromScannerToFile(stream.memoryAddress(),
arrowSchema.memoryAddress(),
+ format.id(), uri, partitionColumns, maxPartitions,
baseNameTemplate);
+ currentReader.close();
+ stream.close();
+ }
Review Comment:
I don't think this is right. We should implement ArrowReader over Scanner,
then write the entire dataset in one go. And then this method can be used to
write _any_ ArrowReader, not just Scanners.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]