lidavidm commented on a change in pull request #138:
URL: https://github.com/apache/arrow-cookbook/pull/138#discussion_r811211116



##########
File path: java/source/dataset.rst
##########
@@ -0,0 +1,297 @@
+.. _arrow-dataset:
+
+=======
+Dataset
+=======
+
+* `Arrow Java Dataset`_: Java implementation of Arrow Datasets library. 
Implement Dataset Java API by JNI to C++.
+
+.. contents::
+
+Constructing Datasets
+=====================
+
+We can construct a dataset with an auto-inferred schema.
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import java.util.stream.StreamSupport;
+
+    try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) {
+        String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+        try (DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)) {
+            try(Dataset dataset = datasetFactory.finish()){
+                ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+                try(Scanner scanner = dataset.newScan(options)){
+                    
System.out.println(StreamSupport.stream(scanner.scan().spliterator(), 
false).count());
+                }
+            }
+        }
+    }
+
+.. testoutput::
+
+    1
+
+Let construct our dataset with predefined schema.
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import java.util.stream.StreamSupport;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) {
+        try (DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)) {
+            try(Dataset dataset = 
datasetFactory.finish(datasetFactory.inspect())){
+                ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+                try(Scanner scanner = dataset.newScan(options)){
+                    
System.out.println(StreamSupport.stream(scanner.scan().spliterator(), 
false).count());
+                }
+            }
+        }
+    }
+
+.. testoutput::
+
+    1
+
+Getting the Schema
+==================
+
+During Dataset Construction
+***************************
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.types.pojo.Schema;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)){
+        try(DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)){
+            Schema schema = datasetFactory.inspect();
+
+            System.out.println(schema);
+        }
+    }
+
+.. testoutput::
+
+    Schema<id: Int(32, true), name: Utf8>(metadata: 
{parquet.avro.schema={"type":"record","name":"User","namespace":"org.apache.arrow.dataset","fields":[{"name":"id","type":["int","null"]},{"name":"name","type":["string","null"]}]},
 writer.model.name=avro})
+
+From a Dataset
+**************
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.types.pojo.Schema;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)){
+        try(DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)){
+            ScanOptions options = new ScanOptions(/*batchSize*/ 1);
+            try(Dataset dataset = datasetFactory.finish()){
+                try(Scanner scanner = dataset.newScan(options)){
+                    Schema schema = scanner.schema();
+
+                    System.out.println(schema);
+                }
+            }
+        }
+    }
+
+.. testoutput::
+
+    Schema<id: Int(32, true), name: Utf8>(metadata: 
{parquet.avro.schema={"type":"record","name":"User","namespace":"org.apache.arrow.dataset","fields":[{"name":"id","type":["int","null"]},{"name":"name","type":["string","null"]}]},
 writer.model.name=avro})
+
+Query Parquet File
+==================
+
+Let query information for a parquet file.
+
+Query Data Content For File
+***************************
+
+.. testcode::
+
+    import com.google.common.collect.Streams;
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.VectorLoader;
+    import org.apache.arrow.vector.VectorSchemaRoot;
+    import org.apache.arrow.vector.ipc.message.ArrowRecordBatch;
+
+    import java.util.stream.Stream;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE);
+        DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri);
+        Dataset dataset = datasetFactory.finish()){
+        ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+        try(Scanner scanner = dataset.newScan(options);
+            VectorSchemaRoot vsr = VectorSchemaRoot.create(scanner.schema(), 
rootAllocator)){
+            scanner.scan().forEach(scanTask-> {
+                Stream<ArrowRecordBatch> stream = 
Streams.stream(scanTask.execute());
+                VectorLoader loader = new VectorLoader(vsr);
+                stream.forEach(arrowRecordBatch -> {
+                    loader.load(arrowRecordBatch);
+                    System.out.print(vsr.contentToTSVString());
+                    arrowRecordBatch.close();
+                });
+            });
+        }
+    }
+
+.. testoutput::
+
+    id    name
+    1    David
+    2    Gladis
+    3    Juan
+
+Query Data Content For Directory
+********************************
+
+Consider that we have these files: data1: 3 rows, data2: 3 rows and data3: 250 
rows.
+
+.. testcode::
+
+    import com.google.common.collect.Streams;
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.util.AutoCloseables;
+    import org.apache.arrow.vector.FieldVector;
+    import org.apache.arrow.vector.VectorLoader;
+    import org.apache.arrow.vector.VectorSchemaRoot;
+    import org.apache.arrow.vector.ipc.message.ArrowRecordBatch;
+    import org.apache.arrow.vector.types.pojo.Schema;
+
+    import java.util.List;
+    import java.util.stream.Collectors;
+    import java.util.stream.StreamSupport;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE);
+        DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri);
+        Dataset dataset = datasetFactory.finish()){
+        ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+        try(Scanner scanner = dataset.newScan(options);
+            VectorSchemaRoot vsr = VectorSchemaRoot.create(scanner.schema(), 
rootAllocator)){
+            scanner.scan().forEach(scanTask-> {
+                Stream<ArrowRecordBatch> stream = 
Streams.stream(scanTask.execute());

Review comment:
       Just wondering, why can't we use a foreach loop instead of using streams 
in these recipes? Streams are a bit incidental/cluttered here

##########
File path: java/source/dataset.rst
##########
@@ -0,0 +1,297 @@
+.. _arrow-dataset:
+
+=======
+Dataset
+=======
+
+* `Arrow Java Dataset`_: Java implementation of Arrow Datasets library. 
Implement Dataset Java API by JNI to C++.
+
+.. contents::
+
+Constructing Datasets
+=====================
+
+We can construct a dataset with an auto-inferred schema.
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import java.util.stream.StreamSupport;
+
+    try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) {
+        String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+        try (DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)) {
+            try(Dataset dataset = datasetFactory.finish()){
+                ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+                try(Scanner scanner = dataset.newScan(options)){
+                    
System.out.println(StreamSupport.stream(scanner.scan().spliterator(), 
false).count());
+                }
+            }
+        }
+    }
+
+.. testoutput::
+
+    1
+
+Let construct our dataset with predefined schema.
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import java.util.stream.StreamSupport;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) {
+        try (DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)) {
+            try(Dataset dataset = 
datasetFactory.finish(datasetFactory.inspect())){
+                ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+                try(Scanner scanner = dataset.newScan(options)){
+                    
System.out.println(StreamSupport.stream(scanner.scan().spliterator(), 
false).count());
+                }
+            }
+        }
+    }
+
+.. testoutput::
+
+    1
+
+Getting the Schema
+==================
+
+During Dataset Construction
+***************************
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.types.pojo.Schema;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)){
+        try(DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)){
+            Schema schema = datasetFactory.inspect();
+
+            System.out.println(schema);
+        }
+    }
+
+.. testoutput::
+
+    Schema<id: Int(32, true), name: Utf8>(metadata: 
{parquet.avro.schema={"type":"record","name":"User","namespace":"org.apache.arrow.dataset","fields":[{"name":"id","type":["int","null"]},{"name":"name","type":["string","null"]}]},
 writer.model.name=avro})
+
+From a Dataset
+**************
+
+.. testcode::
+
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.types.pojo.Schema;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)){
+        try(DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri)){
+            ScanOptions options = new ScanOptions(/*batchSize*/ 1);
+            try(Dataset dataset = datasetFactory.finish()){
+                try(Scanner scanner = dataset.newScan(options)){
+                    Schema schema = scanner.schema();
+
+                    System.out.println(schema);
+                }
+            }
+        }
+    }
+
+.. testoutput::
+
+    Schema<id: Int(32, true), name: Utf8>(metadata: 
{parquet.avro.schema={"type":"record","name":"User","namespace":"org.apache.arrow.dataset","fields":[{"name":"id","type":["int","null"]},{"name":"name","type":["string","null"]}]},
 writer.model.name=avro})
+
+Query Parquet File
+==================
+
+Let query information for a parquet file.
+
+Query Data Content For File
+***************************
+
+.. testcode::
+
+    import com.google.common.collect.Streams;
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.VectorLoader;
+    import org.apache.arrow.vector.VectorSchemaRoot;
+    import org.apache.arrow.vector.ipc.message.ArrowRecordBatch;
+
+    import java.util.stream.Stream;
+
+    String uri = "file:" + System.getProperty("user.dir") + 
"/thirdpartydeps/parquetfiles/data1.parquet";
+    try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE);
+        DatasetFactory datasetFactory = new 
FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), 
FileFormat.PARQUET, uri);
+        Dataset dataset = datasetFactory.finish()){
+        ScanOptions options = new ScanOptions(/*batchSize*/ 100);
+        try(Scanner scanner = dataset.newScan(options);
+            VectorSchemaRoot vsr = VectorSchemaRoot.create(scanner.schema(), 
rootAllocator)){
+            scanner.scan().forEach(scanTask-> {

Review comment:
       Filed https://issues.apache.org/jira/browse/ARROW-15745




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to