lidavidm commented on a change in pull request #138: URL: https://github.com/apache/arrow-cookbook/pull/138#discussion_r810014264
########## File path: java/source/dataset.rst ########## @@ -0,0 +1,300 @@ +.. _arrow-dataset: + +======= +Dataset +======= + +* `Arrow Java Dataset <https://arrow.apache.org/docs/dev/java/dataset.html>`_: Java implementation of Arrow Datasets library. + +.. contents:: + +Dataset +======= + +Let construct our dataset with auto-inferred schema. + +.. testcode:: + + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.scanner.ScanOptions; + import org.apache.arrow.dataset.scanner.Scanner; + import org.apache.arrow.dataset.source.Dataset; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import java.util.stream.StreamSupport; + + try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) { + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data1.parquet"; + try (DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri)) { + try(Dataset dataset = datasetFactory.finish()){ + ScanOptions options = new ScanOptions(100); + try(Scanner scanner = dataset.newScan(options)){ + System.out.println(StreamSupport.stream(scanner.scan().spliterator(), false).count()); + } + } + } + } + +.. testoutput:: + + 1 + +Let construct our dataset with predefined schema. + +.. testcode:: + + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.scanner.ScanOptions; + import org.apache.arrow.dataset.scanner.Scanner; + import org.apache.arrow.dataset.source.Dataset; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import java.util.stream.StreamSupport; + + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data1.parquet"; + try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) { + try (DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri)) { + try(Dataset dataset = datasetFactory.finish(datasetFactory.inspect())){ + ScanOptions options = new ScanOptions(100); + try(Scanner scanner = dataset.newScan(options)){ + System.out.println(StreamSupport.stream(scanner.scan().spliterator(), false).count()); + } + } + } + } + +.. testoutput:: + + 1 + +Getting the Schema of a Dataset Review comment: I suppose. Could the recipes then be named something like "Getting Schema During Dataset Construction" and "Getting Schema from a Dataset"? ########## File path: java/source/dataset.rst ########## @@ -0,0 +1,300 @@ +.. _arrow-dataset: + +======= +Dataset +======= + +* `Arrow Java Dataset <https://arrow.apache.org/docs/dev/java/dataset.html>`_: Java implementation of Arrow Datasets library. + +.. contents:: + +Dataset +======= + +Let construct our dataset with auto-inferred schema. + +.. testcode:: + + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.scanner.ScanOptions; + import org.apache.arrow.dataset.scanner.Scanner; + import org.apache.arrow.dataset.source.Dataset; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import java.util.stream.StreamSupport; + + try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) { + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data1.parquet"; + try (DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri)) { + try(Dataset dataset = datasetFactory.finish()){ + ScanOptions options = new ScanOptions(100); + try(Scanner scanner = dataset.newScan(options)){ + System.out.println(StreamSupport.stream(scanner.scan().spliterator(), false).count()); + } + } + } + } + +.. testoutput:: + + 1 + +Let construct our dataset with predefined schema. + +.. testcode:: + + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.scanner.ScanOptions; + import org.apache.arrow.dataset.scanner.Scanner; + import org.apache.arrow.dataset.source.Dataset; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import java.util.stream.StreamSupport; + + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data1.parquet"; + try (RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)) { + try (DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri)) { + try(Dataset dataset = datasetFactory.finish(datasetFactory.inspect())){ + ScanOptions options = new ScanOptions(100); + try(Scanner scanner = dataset.newScan(options)){ + System.out.println(StreamSupport.stream(scanner.scan().spliterator(), false).count()); + } + } + } + } + +.. testoutput:: + + 1 + +Getting the Schema of a Dataset +=============================== + +Inspect Schema +************** + +.. testcode:: + + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import org.apache.arrow.vector.types.pojo.Schema; + + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data3.parquet"; + try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)){ + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data3.parquet"; + try(DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri)){ + Schema schema = datasetFactory.inspect(); + + System.out.println(schema); + } + } + +.. testoutput:: + + Schema<id: Int(32, true), name: Utf8>(metadata: {parquet.avro.schema={"type":"record","name":"User","namespace":"org.apache.arrow.dataset","fields":[{"name":"id","type":["int","null"]},{"name":"name","type":["string","null"]}]}, writer.model.name=avro}) + +Infer Schema +************ + +.. testcode:: + + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.scanner.ScanOptions; + import org.apache.arrow.dataset.scanner.Scanner; + import org.apache.arrow.dataset.source.Dataset; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import org.apache.arrow.vector.types.pojo.Schema; + + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data3.parquet"; + try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE)){ + try(DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri)){ + ScanOptions options = new ScanOptions(1); + try(Dataset dataset = datasetFactory.finish()){ + try(Scanner scanner = dataset.newScan(options)){ + Schema schema = scanner.schema(); + + System.out.println(schema); + } + } + } + } + +.. testoutput:: + + Schema<id: Int(32, true), name: Utf8>(metadata: {parquet.avro.schema={"type":"record","name":"User","namespace":"org.apache.arrow.dataset","fields":[{"name":"id","type":["int","null"]},{"name":"name","type":["string","null"]}]}, writer.model.name=avro}) + +Query Parquet File +================== + +Let query information for a parquet file. + +Query Data Content For File +*************************** + +.. testcode:: + + import com.google.common.collect.Streams; + import org.apache.arrow.dataset.file.FileFormat; + import org.apache.arrow.dataset.file.FileSystemDatasetFactory; + import org.apache.arrow.dataset.jni.NativeMemoryPool; + import org.apache.arrow.dataset.scanner.ScanOptions; + import org.apache.arrow.dataset.scanner.Scanner; + import org.apache.arrow.dataset.source.Dataset; + import org.apache.arrow.dataset.source.DatasetFactory; + import org.apache.arrow.memory.RootAllocator; + import org.apache.arrow.util.AutoCloseables; + import org.apache.arrow.vector.FieldVector; + import org.apache.arrow.vector.VectorLoader; + import org.apache.arrow.vector.VectorSchemaRoot; + import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; + import org.apache.arrow.vector.types.pojo.Schema; + + import java.util.List; + import java.util.stream.Collectors; + import java.util.stream.StreamSupport; + + String uri = "file:" + System.getProperty("user.dir") + "/thirdpartydeps/parquetfiles/data1.parquet"; + try(RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE); + DatasetFactory datasetFactory = new FileSystemDatasetFactory(rootAllocator, NativeMemoryPool.getDefault(), FileFormat.PARQUET, uri); + Dataset dataset = datasetFactory.finish()){ + ScanOptions options = new ScanOptions(100); + try(Scanner scanner = dataset.newScan(options)){ + Schema schema = scanner.schema(); + List<ArrowRecordBatch> batches = StreamSupport.stream(scanner.scan().spliterator(), false).flatMap(t -> Streams.stream(t.execute())).collect(Collectors.toList()); Review comment: Unit tests aren't necessarily examples of good code, and collecting all data up front means we have to have enough memory for all the data. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
