This is an automated email from the ASF dual-hosted git repository.

qiangcai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 1c8e9b6  [CARBONDATA-3317] Fix NPE when execute 'show segments' 
command for stream table
1c8e9b6 is described below

commit 1c8e9b680bac128e48e860acb4b4d59b0410231a
Author: Zhang Zhichao <441586...@qq.com>
AuthorDate: Sat Mar 16 23:49:24 2019 +0800

    [CARBONDATA-3317] Fix NPE when execute 'show segments' command for stream 
table
    
    When spark streaming app starts to create new stream segment, it does not 
create carbondataindex file before writing data successfully, and now if 
execute 'show segments' command, it will throw NPE.
    
    This closes #3149
---
 .../src/main/scala/org/apache/carbondata/api/CarbonStore.scala | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
index 11db430..f5e429e 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
@@ -103,8 +103,14 @@ object CarbonStore {
             // since it is continuously inserting data
             val segmentDir = CarbonTablePath.getSegmentPath(tablePath, 
load.getLoadName)
             val indexPath = 
CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir)
-            val indices = StreamSegment.readIndexFile(indexPath, 
FileFactory.getFileType(indexPath))
-            (indices.asScala.map(_.getFile_size).sum, 
FileFactory.getCarbonFile(indexPath).getSize)
+            val indexFile = FileFactory.getCarbonFile(indexPath)
+            if (indexFile.exists()) {
+              val indices =
+                StreamSegment.readIndexFile(indexPath, 
FileFactory.getFileType(indexPath))
+              (indices.asScala.map(_.getFile_size).sum, indexFile.getSize)
+            } else {
+              (-1L, -1L)
+            }
           } else {
             // for batch segment, we can get the data size from table status 
file directly
             (if (load.getDataSize == null) -1L else load.getDataSize.toLong,

Reply via email to