Repository: spark
Updated Branches:
  refs/heads/master ecc563135 -> 0a4b7e4f8


[MINOR] Fix some typo of the document

## What changes were proposed in this pull request?

Fix some typo of the document.

## How was this patch tested?

Existing tests.

Please review http://spark.apache.org/contributing.html before opening a pull 
request.

Author: Xianyang Liu <[email protected]>

Closes #18350 from ConeyLiu/fixtypo.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/0a4b7e4f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/0a4b7e4f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/0a4b7e4f

Branch: refs/heads/master
Commit: 0a4b7e4f81109cff651d2afb94f9f8bf734abdeb
Parents: ecc5631
Author: Xianyang Liu <[email protected]>
Authored: Mon Jun 19 20:35:58 2017 +0100
Committer: Sean Owen <[email protected]>
Committed: Mon Jun 19 20:35:58 2017 +0100

----------------------------------------------------------------------
 dev/change-version-to-2.10.sh                                      | 2 +-
 dev/change-version-to-2.11.sh                                      | 2 +-
 python/pyspark/__init__.py                                         | 2 +-
 .../org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala  | 2 +-
 .../org/apache/spark/sql/execution/streaming/BatchCommitLog.scala  | 2 +-
 .../test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala  | 2 +-
 .../spark/sql/execution/datasources/FileSourceStrategySuite.scala  | 2 +-
 7 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/dev/change-version-to-2.10.sh
----------------------------------------------------------------------
diff --git a/dev/change-version-to-2.10.sh b/dev/change-version-to-2.10.sh
index 0962d34..b718d94 100755
--- a/dev/change-version-to-2.10.sh
+++ b/dev/change-version-to-2.10.sh
@@ -17,7 +17,7 @@
 # limitations under the License.
 #
 
-# This script exists for backwards compability. Use change-scala-version.sh 
instead.
+# This script exists for backwards compatibility. Use change-scala-version.sh 
instead.
 echo "This script is deprecated. Please instead run: change-scala-version.sh 
2.10"
 
 $(dirname $0)/change-scala-version.sh 2.10

http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/dev/change-version-to-2.11.sh
----------------------------------------------------------------------
diff --git a/dev/change-version-to-2.11.sh b/dev/change-version-to-2.11.sh
index 4ccfeef..9308795 100755
--- a/dev/change-version-to-2.11.sh
+++ b/dev/change-version-to-2.11.sh
@@ -17,7 +17,7 @@
 # limitations under the License.
 #
 
-# This script exists for backwards compability. Use change-scala-version.sh 
instead.
+# This script exists for backwards compatibility. Use change-scala-version.sh 
instead.
 echo "This script is deprecated. Please instead run: change-scala-version.sh 
2.11"
 
 $(dirname $0)/change-scala-version.sh 2.11

http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/python/pyspark/__init__.py
----------------------------------------------------------------------
diff --git a/python/pyspark/__init__.py b/python/pyspark/__init__.py
index 14c51a3..4d142c9 100644
--- a/python/pyspark/__init__.py
+++ b/python/pyspark/__init__.py
@@ -35,7 +35,7 @@ Public classes:
   - :class:`StorageLevel`:
       Finer-grained cache persistence levels.
   - :class:`TaskContext`:
-      Information about the current running task, avaialble on the workers and 
experimental.
+      Information about the current running task, available on the workers and 
experimental.
 
 """
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala
index f93e573..ede0b16 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala
@@ -39,7 +39,7 @@ object ExpressionSet {
  * guaranteed to see at least one such expression.  For example:
  *
  * {{{
- *   val set = AttributeSet(a + 1, 1 + a)
+ *   val set = ExpressionSet(a + 1, 1 + a)
  *
  *   set.iterator => Iterator(a + 1)
  *   set.contains(a + 1) => true

http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/BatchCommitLog.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/BatchCommitLog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/BatchCommitLog.scala
index a34938f..5e24e8f 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/BatchCommitLog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/BatchCommitLog.scala
@@ -33,7 +33,7 @@ import org.apache.spark.sql.SparkSession
  * - process batch 1
  * - write batch 1 to completion log
  * - trigger batch 2
- * - obtain bactch 2 offsets and write to offset log
+ * - obtain batch 2 offsets and write to offset log
  * - process batch 2
  * - write batch 2 to completion log
  * ....

http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
index 8569c2d..5db354d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
@@ -507,7 +507,7 @@ class DataFrameAggregateSuite extends QueryTest with 
SharedSQLContext {
       Row(2.0) :: Row(2.0) :: Row(2.0) :: Nil)
   }
 
-  test("SQL decimal test (used for catching certain demical handling bugs in 
aggregates)") {
+  test("SQL decimal test (used for catching certain decimal handling bugs in 
aggregates)") {
     checkAnswer(
       decimalData.groupBy('a cast DecimalType(10, 2)).agg(avg('b cast 
DecimalType(10, 2))),
       Seq(Row(new java.math.BigDecimal(1.0), new java.math.BigDecimal(1.5)),

http://git-wip-us.apache.org/repos/asf/spark/blob/0a4b7e4f/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
index 9a2dcaf..d77f0c2 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
@@ -244,7 +244,7 @@ class FileSourceStrategySuite extends QueryTest with 
SharedSQLContext with Predi
     val df2 = table.where("(p1 + c2) = 2 AND c1 = 1")
     // Filter on data only are advisory so we have to reevaluate.
     assert(getPhysicalFilters(df2) contains resolve(df2, "c1 = 1"))
-    // Need to evalaute filters that are not pushed down.
+    // Need to evaluate filters that are not pushed down.
     assert(getPhysicalFilters(df2) contains resolve(df2, "(p1 + c2) = 2"))
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to