This is an automated email from the ASF dual-hosted git repository.

yangjie01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new c1e5f53cbe0f [SPARK-50791][SQL] Fix NPE in State Store error handling
c1e5f53cbe0f is described below

commit c1e5f53cbe0fd8b1358d909e5126530abf3ce004
Author: Livia Zhu <[email protected]>
AuthorDate: Tue Jan 14 08:26:17 2025 +0800

    [SPARK-50791][SQL] Fix NPE in State Store error handling
    
    ### What changes were proposed in this pull request?
    
    Directly calling `contains` on a `SparkException.getCondition` may result 
in a NullPointerException if `getCondition` returns `null`. This change wraps 
`getCondition` in an option for safe handling.
    
    ### Why are the changes needed?
    
    Throwing an NPE is a bug.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes. Previously, on SparkException such as OOM that do not have a set 
condition, users would see an NullPointerException. Now they will correctly see 
a CANNOT_LOAD_STATE_STORE exception.
    
    ### How was this patch tested?
    
    Existing tests.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No
    
    Closes #49451 from liviazhu-db/liviazhu-db/statestore-npe.
    
    Authored-by: Livia Zhu <[email protected]>
    Signed-off-by: yangjie01 <[email protected]>
---
 .../execution/streaming/state/HDFSBackedStateStoreProvider.scala    | 3 ++-
 .../sql/execution/streaming/state/RocksDBStateStoreProvider.scala   | 6 ++++--
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
index ae06e82335b1..2deccb845fea 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
@@ -291,7 +291,8 @@ private[sql] class HDFSBackedStateStoreProvider extends 
StateStoreProvider with
       newMap
     }
     catch {
-      case e: SparkException if 
e.getCondition.contains("CANNOT_LOAD_STATE_STORE") =>
+      case e: SparkException
+        if 
Option(e.getCondition).exists(_.contains("CANNOT_LOAD_STATE_STORE")) =>
         throw e
       case e: OutOfMemoryError =>
         throw QueryExecutionErrors.notEnoughMemoryToLoadStore(
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreProvider.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreProvider.scala
index 60652367f335..9fc48a60d7c6 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreProvider.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreProvider.scala
@@ -439,7 +439,8 @@ private[sql] class RocksDBStateStoreProvider
       new RocksDBStateStore(version)
     }
     catch {
-      case e: SparkException if 
e.getCondition.contains("CANNOT_LOAD_STATE_STORE") =>
+      case e: SparkException
+        if 
Option(e.getCondition).exists(_.contains("CANNOT_LOAD_STATE_STORE")) =>
         throw e
       case e: OutOfMemoryError =>
         throw QueryExecutionErrors.notEnoughMemoryToLoadStore(
@@ -462,7 +463,8 @@ private[sql] class RocksDBStateStoreProvider
       new RocksDBStateStore(version)
     }
     catch {
-      case e: SparkException if 
e.getCondition.contains("CANNOT_LOAD_STATE_STORE") =>
+      case e: SparkException
+        if 
Option(e.getCondition).exists(_.contains("CANNOT_LOAD_STATE_STORE")) =>
         throw e
       case e: OutOfMemoryError =>
         throw QueryExecutionErrors.notEnoughMemoryToLoadStore(


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to