Repository: spark
Updated Branches:
  refs/heads/branch-2.0 0b0b5fe54 -> 363db9f8b


[SPARK-16050][TESTS] Remove the flaky test: ConsoleSinkSuite

## What changes were proposed in this pull request?

ConsoleSinkSuite just collects content from stdout and compare them with the 
expected string. However, because Spark may not stop some background threads at 
once, there is a race condition that other threads are outputting logs to 
**stdout** while ConsoleSinkSuite is running. Then it will make 
ConsoleSinkSuite fail.

Therefore, I just deleted `ConsoleSinkSuite`. If we want to test 
ConsoleSinkSuite in future, we should refactoring ConsoleSink to make it 
testable instead of depending on stdout. Therefore, this test is useless and I 
just delete it.

## How was this patch tested?

Just removed a flaky test.

Author: Shixiong Zhu <shixi...@databricks.com>

Closes #13776 from zsxwing/SPARK-16050.

(cherry picked from commit 5cfabec8724714b897d6e23e670c39e58f554ea2)
Signed-off-by: Michael Armbrust <mich...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/363db9f8
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/363db9f8
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/363db9f8

Branch: refs/heads/branch-2.0
Commit: 363db9f8be53773238854ab16c3459ba46a6961b
Parents: 0b0b5fe
Author: Shixiong Zhu <shixi...@databricks.com>
Authored: Mon Jun 20 10:35:37 2016 -0700
Committer: Michael Armbrust <mich...@databricks.com>
Committed: Mon Jun 20 10:35:49 2016 -0700

----------------------------------------------------------------------
 .../execution/streaming/ConsoleSinkSuite.scala  | 99 --------------------
 1 file changed, 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/363db9f8/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ConsoleSinkSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ConsoleSinkSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ConsoleSinkSuite.scala
deleted file mode 100644
index e853d8c..0000000
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ConsoleSinkSuite.scala
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql.execution.streaming
-
-import java.io.{ByteArrayOutputStream, PrintStream}
-import java.nio.charset.StandardCharsets.UTF_8
-
-import org.scalatest.BeforeAndAfter
-
-import org.apache.spark.sql.streaming.StreamTest
-
-class ConsoleSinkSuite extends StreamTest with BeforeAndAfter {
-
-  import testImplicits._
-
-  after {
-    sqlContext.streams.active.foreach(_.stop())
-  }
-
-  test("SPARK-16020 Complete mode aggregation with console sink") {
-    withTempDir { checkpointLocation =>
-      val origOut = System.out
-      val stdout = new ByteArrayOutputStream()
-      try {
-        // Hook Java System.out.println
-        System.setOut(new PrintStream(stdout))
-        // Hook Scala println
-        Console.withOut(stdout) {
-          val input = MemoryStream[String]
-          val df = input.toDF().groupBy("value").count()
-          val query = df.writeStream
-            .format("console")
-            .outputMode("complete")
-            .option("checkpointLocation", checkpointLocation.getAbsolutePath)
-            .start()
-          input.addData("a")
-          query.processAllAvailable()
-          input.addData("a", "b")
-          query.processAllAvailable()
-          input.addData("a", "b", "c")
-          query.processAllAvailable()
-          query.stop()
-        }
-        System.out.flush()
-      } finally {
-        System.setOut(origOut)
-      }
-
-      val expected = """-------------------------------------------
-        |Batch: 0
-        |-------------------------------------------
-        |+-----+-----+
-        ||value|count|
-        |+-----+-----+
-        ||    a|    1|
-        |+-----+-----+
-        |
-        |-------------------------------------------
-        |Batch: 1
-        |-------------------------------------------
-        |+-----+-----+
-        ||value|count|
-        |+-----+-----+
-        ||    a|    2|
-        ||    b|    1|
-        |+-----+-----+
-        |
-        |-------------------------------------------
-        |Batch: 2
-        |-------------------------------------------
-        |+-----+-----+
-        ||value|count|
-        |+-----+-----+
-        ||    a|    3|
-        ||    b|    2|
-        ||    c|    1|
-        |+-----+-----+
-        |
-        |""".stripMargin
-      assert(expected === new String(stdout.toByteArray, UTF_8))
-    }
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to