Repository: spark
Updated Branches:
  refs/heads/branch-2.0 8467e2102 -> 6d82e0c1b


[SPARK-15562][ML] Delete temp directory after program exit in DataFrameExample

## What changes were proposed in this pull request?
Temp directory used to save records is not deleted after program exit in 
DataFrameExample. Although it called deleteOnExit, it doesn't work as the 
directory is not empty. Similar things happend in ContextCleanerSuite. Update 
the code to make sure temp directory is deleted after program exit.

## How was this patch tested?

unit tests and local build.

Author: dding3 <ding.d...@intel.com>

Closes #13328 from dding3/master.

(cherry picked from commit 88c9c467a31630c558719679ca0894873a268b27)
Signed-off-by: Sean Owen <so...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6d82e0c1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6d82e0c1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6d82e0c1

Branch: refs/heads/branch-2.0
Commit: 6d82e0c1b8b4368e91aeebfc80430a61762c7e88
Parents: 8467e21
Author: dding3 <ding.d...@intel.com>
Authored: Fri May 27 21:01:50 2016 -0500
Committer: Sean Owen <so...@cloudera.com>
Committed: Fri May 27 21:01:56 2016 -0500

----------------------------------------------------------------------
 core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala   | 4 ++--
 .../scala/org/apache/spark/examples/ml/DataFrameExample.scala    | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/6d82e0c1/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala 
b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
index 69ff6c7..6724af9 100644
--- a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
@@ -32,6 +32,7 @@ import org.apache.spark.internal.Logging
 import org.apache.spark.rdd.{RDD, ReliableRDDCheckpointData}
 import org.apache.spark.shuffle.sort.SortShuffleManager
 import org.apache.spark.storage._
+import org.apache.spark.util.Utils
 
 /**
  * An abstract base class for context cleaner tests, which sets up a context 
with a config
@@ -206,8 +207,7 @@ class ContextCleanerSuite extends ContextCleanerSuiteBase {
   }
 
   test("automatically cleanup normal checkpoint") {
-    val checkpointDir = java.io.File.createTempFile("temp", "")
-    checkpointDir.deleteOnExit()
+    val checkpointDir = Utils.createTempDir()
     checkpointDir.delete()
     var rdd = newPairRDD()
     sc.setCheckpointDir(checkpointDir.toString)

http://git-wip-us.apache.org/repos/asf/spark/blob/6d82e0c1/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
index c69027b..11faa61 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
@@ -28,6 +28,7 @@ import org.apache.spark.ml.linalg.Vector
 import org.apache.spark.mllib.linalg.Vectors
 import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
 import org.apache.spark.sql.{DataFrame, Row, SparkSession}
+import org.apache.spark.util.Utils
 
 /**
  * An example of how to use [[org.apache.spark.sql.DataFrame]] for ML. Run with
@@ -86,8 +87,7 @@ object DataFrameExample {
     println(s"Selected features column with average values:\n 
${featureSummary.mean.toString}")
 
     // Save the records in a parquet file.
-    val tmpDir = Files.createTempDir()
-    tmpDir.deleteOnExit()
+    val tmpDir = Utils.createTempDir()
     val outputDir = new File(tmpDir, "dataframe").toString
     println(s"Saving to $outputDir as Parquet file.")
     df.write.parquet(outputDir)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to