This is an automated email from the ASF dual-hosted git repository.

cgivre pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git


The following commit(s) were added to refs/heads/master by this push:
     new 9ad10f5  DRILL-7875: Drill Fails to Splunk Indexes with no Timestamp
9ad10f5 is described below

commit 9ad10f5949c3d2171106fae418052e3077c65200
Author: Charles Givre <[email protected]>
AuthorDate: Sat Mar 6 22:18:57 2021 -0500

    DRILL-7875: Drill Fails to Splunk Indexes with no Timestamp
---
 .../org/apache/drill/exec/store/esri/ShpBatchReader.java |  6 +++---
 .../drill/exec/store/splunk/SplunkBatchReader.java       | 16 ++++++++++++----
 2 files changed, 15 insertions(+), 7 deletions(-)

diff --git 
a/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpBatchReader.java
 
b/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpBatchReader.java
index d499a3f..7a129a0 100644
--- 
a/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpBatchReader.java
+++ 
b/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpBatchReader.java
@@ -125,7 +125,7 @@ public class ShpBatchReader implements 
ManagedReader<FileSchemaNegotiator> {
 
   private void openFile(FileSchemaNegotiator negotiator) {
     try {
-      fileReaderShp = negotiator.fileSystem().open(split.getPath());
+      fileReaderShp = 
negotiator.fileSystem().openPossiblyCompressedStream(split.getPath());
       byte[] shpBuf = new byte[fileReaderShp.available()];
       fileReaderShp.read(shpBuf);
 
@@ -135,10 +135,10 @@ public class ShpBatchReader implements 
ManagedReader<FileSchemaNegotiator> {
       ShapefileReader shpReader = new ShapefileReader();
       geomCursor = shpReader.getGeometryCursor(byteBuffer);
 
-      fileReaderDbf = negotiator.fileSystem().open(hadoopDbf);
+      fileReaderDbf = 
negotiator.fileSystem().openPossiblyCompressedStream(hadoopDbf);
       dbfReader = new DbfReader(fileReaderDbf);
 
-      fileReaderPrj = negotiator.fileSystem().open(hadoopPrj);
+      fileReaderPrj = 
negotiator.fileSystem().openPossiblyCompressedStream(hadoopPrj);
       byte[] prjBuf = new byte[fileReaderPrj.available()];
       fileReaderPrj.read(prjBuf);
       fileReaderPrj.close();
diff --git 
a/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkBatchReader.java
 
b/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkBatchReader.java
index 50c1575..b74338c 100644
--- 
a/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkBatchReader.java
+++ 
b/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkBatchReader.java
@@ -369,8 +369,12 @@ public class SplunkBatchReader implements 
ManagedReader<SchemaNegotiator> {
 
     @Override
     public void load(String[] record) {
-      int value = Integer.parseInt(record[columnIndex]);
-      columnWriter.setInt(value);
+      if (record[columnIndex] != null) {
+        int value = Integer.parseInt(record[columnIndex]);
+        columnWriter.setInt(value);
+      } else {
+        columnWriter.setNull();
+      }
     }
   }
 
@@ -385,8 +389,12 @@ public class SplunkBatchReader implements 
ManagedReader<SchemaNegotiator> {
 
     @Override
     public void load(String[] record) {
-      long value = Long.parseLong(record[columnIndex]) * 1000;
-      columnWriter.setTimestamp(Instant.ofEpochMilli(value));
+      if (record[columnIndex] != null) {
+        long value = Long.parseLong(record[columnIndex]) * 1000;
+        columnWriter.setTimestamp(Instant.ofEpochMilli(value));
+      } else {
+        columnWriter.setNull();
+      }
     }
   }
 }

Reply via email to