Repository: spark
Updated Branches:
  refs/heads/master 6f3cd36f9 -> 929cb8bee


[MINOR][SQL] Fix some typos in comments and test hints

## What changes were proposed in this pull request?

Fix some typos in comments and test hints

## How was this patch tested?

N/A.

Author: Sean Zhong <seanzh...@databricks.com>

Closes #14755 from clockfly/fix_minor_typo.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/929cb8be
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/929cb8be
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/929cb8be

Branch: refs/heads/master
Commit: 929cb8beed9b7014231580cc002853236a5337d6
Parents: 6f3cd36
Author: Sean Zhong <seanzh...@databricks.com>
Authored: Mon Aug 22 13:31:38 2016 -0700
Committer: Yin Huai <yh...@databricks.com>
Committed: Mon Aug 22 13:31:38 2016 -0700

----------------------------------------------------------------------
 .../org/apache/spark/sql/execution/UnsafeKVExternalSorter.java | 2 +-
 .../sql/execution/aggregate/TungstenAggregationIterator.scala  | 6 +++---
 sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala   | 6 +++---
 3 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/929cb8be/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
index eb105bd..0d51dc9 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
@@ -99,7 +99,7 @@ public final class UnsafeKVExternalSorter {
       // The array will be used to do in-place sort, which require half of the 
space to be empty.
       assert(map.numKeys() <= map.getArray().size() / 2);
       // During spilling, the array in map will not be used, so we can borrow 
that and use it
-      // as the underline array for in-memory sorter (it's always large 
enough).
+      // as the underlying array for in-memory sorter (it's always large 
enough).
       // Since we will not grow the array, it's fine to pass `null` as 
consumer.
       final UnsafeInMemorySorter inMemSorter = new UnsafeInMemorySorter(
         null, taskMemoryManager, recordComparator, prefixComparator, 
map.getArray(),

http://git-wip-us.apache.org/repos/asf/spark/blob/929cb8be/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
index 4b8adf5..4e072a9 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
@@ -32,9 +32,9 @@ import org.apache.spark.unsafe.KVIterator
  * An iterator used to evaluate aggregate functions. It operates on 
[[UnsafeRow]]s.
  *
  * This iterator first uses hash-based aggregation to process input rows. It 
uses
- * a hash map to store groups and their corresponding aggregation buffers. If 
we
- * this map cannot allocate memory from memory manager, it spill the map into 
disk
- * and create a new one. After processed all the input, then merge all the 
spills
+ * a hash map to store groups and their corresponding aggregation buffers. If
+ * this map cannot allocate memory from memory manager, it spills the map into 
disk
+ * and creates a new one. After processed all the input, then merge all the 
spills
  * together using external sorter, and do sort-based aggregation.
  *
  * The process has the following step:

http://git-wip-us.apache.org/repos/asf/spark/blob/929cb8be/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
index 484e438..c7af402 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
@@ -358,11 +358,11 @@ abstract class QueryTest extends PlanTest {
    */
   def assertEmptyMissingInput(query: Dataset[_]): Unit = {
     assert(query.queryExecution.analyzed.missingInput.isEmpty,
-      s"The analyzed logical plan has missing inputs: 
${query.queryExecution.analyzed}")
+      s"The analyzed logical plan has missing 
inputs:\n${query.queryExecution.analyzed}")
     assert(query.queryExecution.optimizedPlan.missingInput.isEmpty,
-      s"The optimized logical plan has missing inputs: 
${query.queryExecution.optimizedPlan}")
+      s"The optimized logical plan has missing 
inputs:\n${query.queryExecution.optimizedPlan}")
     assert(query.queryExecution.executedPlan.missingInput.isEmpty,
-      s"The physical plan has missing inputs: 
${query.queryExecution.executedPlan}")
+      s"The physical plan has missing 
inputs:\n${query.queryExecution.executedPlan}")
   }
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to