lidavidm commented on code in PR #316:
URL: https://github.com/apache/arrow-cookbook/pull/316#discussion_r1327787518


##########
java/source/jdbc.rst:
##########
@@ -307,3 +307,191 @@ values to the given scale.
    102    true    100000000030.0000000    some char text      [1,2]
    INT_FIELD1    BOOL_FIELD2    BIGINT_FIELD5    CHAR_FIELD16    LIST_FIELD19
    103    true    10000000003.0000000    some char text      [1]
+
+Write ResultSet to Parquet File
+===============================
+
+As an example, we are trying to write a parquet file from the JDBC adapter 
results.
+
+.. testcode::
+
+    import java.io.BufferedReader;
+    import java.io.FileReader;
+    import java.io.IOException;
+    import java.nio.file.DirectoryStream;
+    import java.nio.file.Files;
+    import java.nio.file.Path;
+    import java.sql.Connection;
+    import java.sql.DriverManager;
+    import java.sql.ResultSet;
+    import java.sql.SQLException;
+    import java.sql.Types;
+    import java.util.HashMap;
+
+    import org.apache.arrow.adapter.jdbc.ArrowVectorIterator;
+    import org.apache.arrow.adapter.jdbc.JdbcFieldInfo;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrow;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowConfig;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowConfigBuilder;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowUtils;
+    import org.apache.arrow.dataset.file.DatasetFileWriter;
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.BufferAllocator;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.VectorSchemaRoot;
+    import org.apache.arrow.vector.ipc.ArrowReader;
+    import org.apache.arrow.vector.types.pojo.Schema;
+    import org.apache.ibatis.jdbc.ScriptRunner;
+    import org.slf4j.LoggerFactory;
+
+    import ch.qos.logback.classic.Level;
+    import ch.qos.logback.classic.Logger;
+
+    class JDBCReader extends ArrowReader {
+      private final ArrowVectorIterator iter;
+      private final JdbcToArrowConfig config;
+      private VectorSchemaRoot root;
+      private boolean firstRoot = true;
+
+      public JDBCReader(BufferAllocator allocator, ArrowVectorIterator iter, 
JdbcToArrowConfig config) {
+        super(allocator);
+        this.iter = iter;
+        this.config = config;
+      }
+
+      @Override
+      public boolean loadNextBatch() throws IOException {
+        if (firstRoot) {
+          firstRoot = false;
+          return true;
+        }
+        else {
+          if (iter.hasNext()) {
+            if (root != null && !config.isReuseVectorSchemaRoot()) {
+              root.close();
+            }
+            else {
+              root.allocateNew();
+            }
+            root = iter.next();
+            return root.getRowCount() != 0;
+          }
+          else {
+            return false;
+          }
+        }
+      }
+
+      @Override
+      public long bytesRead() {
+        return 0;
+      }
+
+      @Override
+      protected void closeReadSource() throws IOException {
+        if (root != null && !config.isReuseVectorSchemaRoot()) {
+          root.close();
+        }
+      }
+
+      @Override
+      protected Schema readSchema() throws IOException {
+        return null;
+      }
+
+      @Override
+      public VectorSchemaRoot getVectorSchemaRoot() throws IOException {
+        if (root == null) {
+          root = iter.next();
+        }
+        return root;
+      }
+    }
+
+    ((Logger) 
LoggerFactory.getLogger("org.apache.arrow")).setLevel(Level.TRACE);
+    try (
+        final BufferAllocator allocator = new RootAllocator();
+        final BufferAllocator allocatorJDBC = 
allocator.newChildAllocator("allocatorJDBC", 0, Long.MAX_VALUE);
+        final BufferAllocator allocatorReader = 
allocator.newChildAllocator("allocatorReader", 0, Long.MAX_VALUE);
+        final BufferAllocator allocatorParquetWrite = 
allocator.newChildAllocator("allocatorParquetWrite", 0,
+            Long.MAX_VALUE);
+        final Connection connection = DriverManager.getConnection(
+            "jdbc:h2:mem:h2-jdbc-adapter")
+    ) {
+      ScriptRunner runnerDDLDML = new ScriptRunner(connection);
+      runnerDDLDML.setLogWriter(null);
+      runnerDDLDML.runScript(new BufferedReader(
+          new FileReader("./thirdpartydeps/jdbc/h2-ddl.sql")));
+      runnerDDLDML.runScript(new BufferedReader(
+          new FileReader("./thirdpartydeps/jdbc/h2-dml.sql")));
+      JdbcToArrowConfig config = new JdbcToArrowConfigBuilder(allocatorJDBC,
+          JdbcToArrowUtils.getUtcCalendar())
+          .setTargetBatchSize(2)
+          .setReuseVectorSchemaRoot(true)
+          .setArraySubTypeByColumnNameMap(

Review Comment:
   In the interest of keeping examples concise, let's use sample data that 
doesn't require us to deal with all of this in the first place.



##########
java/source/jdbc.rst:
##########
@@ -307,3 +307,191 @@ values to the given scale.
    102    true    100000000030.0000000    some char text      [1,2]
    INT_FIELD1    BOOL_FIELD2    BIGINT_FIELD5    CHAR_FIELD16    LIST_FIELD19
    103    true    10000000003.0000000    some char text      [1]
+
+Write ResultSet to Parquet File
+===============================
+
+As an example, we are trying to write a parquet file from the JDBC adapter 
results.
+
+.. testcode::
+
+    import java.io.BufferedReader;
+    import java.io.FileReader;
+    import java.io.IOException;
+    import java.nio.file.DirectoryStream;
+    import java.nio.file.Files;
+    import java.nio.file.Path;
+    import java.sql.Connection;
+    import java.sql.DriverManager;
+    import java.sql.ResultSet;
+    import java.sql.SQLException;
+    import java.sql.Types;
+    import java.util.HashMap;
+
+    import org.apache.arrow.adapter.jdbc.ArrowVectorIterator;
+    import org.apache.arrow.adapter.jdbc.JdbcFieldInfo;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrow;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowConfig;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowConfigBuilder;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowUtils;
+    import org.apache.arrow.dataset.file.DatasetFileWriter;
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.BufferAllocator;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.VectorSchemaRoot;
+    import org.apache.arrow.vector.ipc.ArrowReader;
+    import org.apache.arrow.vector.types.pojo.Schema;
+    import org.apache.ibatis.jdbc.ScriptRunner;
+    import org.slf4j.LoggerFactory;
+
+    import ch.qos.logback.classic.Level;
+    import ch.qos.logback.classic.Logger;
+
+    class JDBCReader extends ArrowReader {

Review Comment:
   Explain that we need this because writing a dataset takes an ArrowReader, so 
we have to adapt the JDBC ArrowVectorIterator to the ArrowReader interface



##########
java/source/jdbc.rst:
##########
@@ -307,3 +307,191 @@ values to the given scale.
    102    true    100000000030.0000000    some char text      [1,2]
    INT_FIELD1    BOOL_FIELD2    BIGINT_FIELD5    CHAR_FIELD16    LIST_FIELD19
    103    true    10000000003.0000000    some char text      [1]
+
+Write ResultSet to Parquet File
+===============================
+
+As an example, we are trying to write a parquet file from the JDBC adapter 
results.
+
+.. testcode::
+
+    import java.io.BufferedReader;
+    import java.io.FileReader;
+    import java.io.IOException;
+    import java.nio.file.DirectoryStream;
+    import java.nio.file.Files;
+    import java.nio.file.Path;
+    import java.sql.Connection;
+    import java.sql.DriverManager;
+    import java.sql.ResultSet;
+    import java.sql.SQLException;
+    import java.sql.Types;
+    import java.util.HashMap;
+
+    import org.apache.arrow.adapter.jdbc.ArrowVectorIterator;
+    import org.apache.arrow.adapter.jdbc.JdbcFieldInfo;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrow;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowConfig;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowConfigBuilder;
+    import org.apache.arrow.adapter.jdbc.JdbcToArrowUtils;
+    import org.apache.arrow.dataset.file.DatasetFileWriter;
+    import org.apache.arrow.dataset.file.FileFormat;
+    import org.apache.arrow.dataset.file.FileSystemDatasetFactory;
+    import org.apache.arrow.dataset.jni.NativeMemoryPool;
+    import org.apache.arrow.dataset.scanner.ScanOptions;
+    import org.apache.arrow.dataset.scanner.Scanner;
+    import org.apache.arrow.dataset.source.Dataset;
+    import org.apache.arrow.dataset.source.DatasetFactory;
+    import org.apache.arrow.memory.BufferAllocator;
+    import org.apache.arrow.memory.RootAllocator;
+    import org.apache.arrow.vector.VectorSchemaRoot;
+    import org.apache.arrow.vector.ipc.ArrowReader;
+    import org.apache.arrow.vector.types.pojo.Schema;
+    import org.apache.ibatis.jdbc.ScriptRunner;
+    import org.slf4j.LoggerFactory;
+
+    import ch.qos.logback.classic.Level;
+    import ch.qos.logback.classic.Logger;
+
+    class JDBCReader extends ArrowReader {
+      private final ArrowVectorIterator iter;
+      private final JdbcToArrowConfig config;
+      private VectorSchemaRoot root;
+      private boolean firstRoot = true;
+
+      public JDBCReader(BufferAllocator allocator, ArrowVectorIterator iter, 
JdbcToArrowConfig config) {
+        super(allocator);
+        this.iter = iter;
+        this.config = config;
+      }
+
+      @Override
+      public boolean loadNextBatch() throws IOException {
+        if (firstRoot) {
+          firstRoot = false;
+          return true;
+        }
+        else {
+          if (iter.hasNext()) {
+            if (root != null && !config.isReuseVectorSchemaRoot()) {
+              root.close();
+            }
+            else {
+              root.allocateNew();
+            }
+            root = iter.next();
+            return root.getRowCount() != 0;
+          }
+          else {
+            return false;
+          }
+        }
+      }
+
+      @Override
+      public long bytesRead() {
+        return 0;
+      }
+
+      @Override
+      protected void closeReadSource() throws IOException {
+        if (root != null && !config.isReuseVectorSchemaRoot()) {
+          root.close();
+        }
+      }
+
+      @Override
+      protected Schema readSchema() throws IOException {
+        return null;
+      }
+
+      @Override
+      public VectorSchemaRoot getVectorSchemaRoot() throws IOException {
+        if (root == null) {
+          root = iter.next();
+        }
+        return root;
+      }
+    }
+
+    ((Logger) 
LoggerFactory.getLogger("org.apache.arrow")).setLevel(Level.TRACE);

Review Comment:
   Why are we fiddling with loggers and adding logback to the example? I don't 
think we need any of that?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to