This is an automated email from the ASF dual-hosted git repository.

boaz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 1355bfddb1c76462366e28ff7f98fdb6823b4b2a
Author: Gautam Parai <[email protected]>
AuthorDate: Thu May 24 19:00:21 2018 -0700

    DRILL-3964 : Fix NPE in WriterRecordBatch when 0 rows
    
    closes #1290
---
 .../org/apache/drill/exec/physical/impl/WriterRecordBatch.java |  8 +++++++-
 .../apache/drill/exec/store/parquet/ParquetRecordWriter.java   |  3 ++-
 .../src/test/java/org/apache/drill/exec/sql/TestCTAS.java      | 10 ++++++++++
 3 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WriterRecordBatch.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WriterRecordBatch.java
index b58cb55..65d0c54 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WriterRecordBatch.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WriterRecordBatch.java
@@ -95,8 +95,14 @@ public class WriterRecordBatch extends 
AbstractRecordBatch<Writer> {
             return upstream;
 
           case NOT_YET:
-          case NONE:
             break;
+          case NONE:
+            if (schema != null) {
+              // Schema is for the output batch schema which is setup in 
setupNewSchema(). Since the output
+              // schema is fixed ((Fragment(VARCHAR), Number of records 
written (BIGINT)) we should set it
+              // up even with 0 records for it to be reported back to the 
client.
+              break;
+            }
 
           case OK_NEW_SCHEMA:
             setupNewSchema();
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
index 114bae4..0e40c9e 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
@@ -230,7 +230,8 @@ public class ParquetRecordWriter extends 
ParquetOutputRecordWriter {
     // Its value is likely below Integer.MAX_VALUE (2GB), although 
rowGroupSize is a long type.
     // Therefore this size is cast to int, since allocating byte array in 
under layer needs to
     // limit the array size in an int scope.
-    int initialBlockBufferSize = max(MINIMUM_BUFFER_SIZE, blockSize / 
this.schema.getColumns().size() / 5);
+    int initialBlockBufferSize = this.schema.getColumns().size() > 0 ?
+        max(MINIMUM_BUFFER_SIZE, blockSize / this.schema.getColumns().size() / 
5) : MINIMUM_BUFFER_SIZE;
     // We don't want this number to be too small either. Ideally, slightly 
bigger than the page size,
     // but not bigger than the block buffer
     int initialPageBufferSize = max(MINIMUM_BUFFER_SIZE, min(pageSize + 
pageSize / 10, initialBlockBufferSize));
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestCTAS.java 
b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestCTAS.java
index 2315a03..2e7c052 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestCTAS.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestCTAS.java
@@ -315,6 +315,16 @@ public class TestCTAS extends BaseTestQuery {
     }
   }
 
+  @Test
+  public void testCTASWithEmptyJson() throws Exception {
+    final String newTblName = "tbl4444";
+    try {
+      test(String.format("CREATE TABLE %s.%s AS SELECT * FROM 
cp.`project/pushdown/empty.json`", DFS_TMP_SCHEMA, newTblName));
+    } finally {
+      test("DROP TABLE IF EXISTS %s.%s", DFS_TMP_SCHEMA, newTblName);
+    }
+  }
+
   private static void ctasErrorTestHelper(final String ctasSql, final String 
expErrorMsg) throws Exception {
     final String createTableSql = String.format(ctasSql, "testTableName");
     errorMsgTestHelper(createTableSql, expErrorMsg);

-- 
To stop receiving notification emails like this one, please contact
[email protected].

Reply via email to