Repository: spark
Updated Branches:
  refs/heads/branch-1.3 0ce148533 -> b90e5cba2


[SPARK-11424] Guard against double-close() of RecordReaders (branch-1.3 
backport)

This is a branch-1.3 backport of #9382, a fix for SPARK-11424.

Author: Josh Rosen <joshro...@databricks.com>

Closes #9423 from JoshRosen/hadoop-decompressor-pooling-fix-branch-1.3.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/b90e5cba
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/b90e5cba
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/b90e5cba

Branch: refs/heads/branch-1.3
Commit: b90e5cba2215869fe858179bf38454c90a64e967
Parents: 0ce1485
Author: Josh Rosen <joshro...@databricks.com>
Authored: Tue Nov 3 14:17:51 2015 -0800
Committer: Josh Rosen <joshro...@databricks.com>
Committed: Tue Nov 3 14:17:51 2015 -0800

----------------------------------------------------------------------
 .../scala/org/apache/spark/rdd/HadoopRDD.scala  | 23 +++++++++++-------
 .../org/apache/spark/rdd/NewHadoopRDD.scala     | 25 +++++++++++++-------
 .../org/apache/spark/util/NextIterator.scala    |  4 +++-
 3 files changed, 34 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/b90e5cba/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 486e86c..e127621 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -254,8 +254,21 @@ class HadoopRDD[K, V](
       }
 
       override def close() {
-        try {
-          reader.close()
+        if (reader != null) {
+          // Close the reader and release it. Note: it's very important that 
we don't close the
+          // reader more than once, since that exposes us to MAPREDUCE-5918 
when running against
+          // Hadoop 1.x and older Hadoop 2.x releases. That bug can lead to 
non-deterministic
+          // corruption issues when reading compressed input.
+          try {
+            reader.close()
+          } catch {
+            case e: Exception =>
+              if (!Utils.inShutdown()) {
+                logWarning("Exception in RecordReader.close()", e)
+              }
+          } finally {
+            reader = null
+          }
           if (bytesReadCallback.isDefined) {
             inputMetrics.updateBytesRead()
           } else if (split.inputSplit.value.isInstanceOf[FileSplit] ||
@@ -269,12 +282,6 @@ class HadoopRDD[K, V](
                 logWarning("Unable to get input size to set InputMetrics for 
task", e)
             }
           }
-        } catch {
-          case e: Exception => {
-            if (!Utils.inShutdown()) {
-              logWarning("Exception in RecordReader.close()", e)
-            }
-          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/b90e5cba/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
index 7fb9484..f863778 100644
--- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
@@ -128,7 +128,7 @@ class NewHadoopRDD[K, V](
           configurable.setConf(conf)
         case _ =>
       }
-      val reader = format.createRecordReader(
+      var reader = format.createRecordReader(
         split.serializableHadoopSplit.value, hadoopAttemptContext)
       reader.initialize(split.serializableHadoopSplit.value, 
hadoopAttemptContext)
 
@@ -158,8 +158,21 @@ class NewHadoopRDD[K, V](
       }
 
       private def close() {
-        try {
-          reader.close()
+        if (reader != null) {
+          // Close the reader and release it. Note: it's very important that 
we don't close the
+          // reader more than once, since that exposes us to MAPREDUCE-5918 
when running against
+          // Hadoop 1.x and older Hadoop 2.x releases. That bug can lead to 
non-deterministic
+          // corruption issues when reading compressed input.
+          try {
+            reader.close()
+          } catch {
+            case e: Exception =>
+              if (!Utils.inShutdown()) {
+                logWarning("Exception in RecordReader.close()", e)
+              }
+          } finally {
+            reader = null
+          }
           if (bytesReadCallback.isDefined) {
             inputMetrics.updateBytesRead()
           } else if 
(split.serializableHadoopSplit.value.isInstanceOf[FileSplit] ||
@@ -173,12 +186,6 @@ class NewHadoopRDD[K, V](
                 logWarning("Unable to get input size to set InputMetrics for 
task", e)
             }
           }
-        } catch {
-          case e: Exception => {
-            if (!Utils.inShutdown()) {
-              logWarning("Exception in RecordReader.close()", e)
-            }
-          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/b90e5cba/core/src/main/scala/org/apache/spark/util/NextIterator.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/util/NextIterator.scala 
b/core/src/main/scala/org/apache/spark/util/NextIterator.scala
index e5c732a..0b505a5 100644
--- a/core/src/main/scala/org/apache/spark/util/NextIterator.scala
+++ b/core/src/main/scala/org/apache/spark/util/NextIterator.scala
@@ -60,8 +60,10 @@ private[spark] abstract class NextIterator[U] extends 
Iterator[U] {
    */
   def closeIfNeeded() {
     if (!closed) {
-      close()
+      // Note: it's important that we set closed = true before calling 
close(), since setting it
+      // afterwards would permit us to call close() multiple times if close() 
threw an exception.
       closed = true
+      close()
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to