Repository: spark
Updated Branches:
  refs/heads/master 0b5917000 -> c0d1bf032


[MINOR] Fix typos and misspellings

## What changes were proposed in this pull request?

Fix typos and misspellings, per 
https://github.com/apache/spark-website/pull/158#issuecomment-435790366

## How was this patch tested?

Existing tests.

Closes #22950 from srowen/Typos.

Authored-by: Sean Owen <sean.o...@databricks.com>
Signed-off-by: Sean Owen <sean.o...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/c0d1bf03
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/c0d1bf03
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/c0d1bf03

Branch: refs/heads/master
Commit: c0d1bf0322be12230c30cb200f19a02e4d5e0d49
Parents: 0b59170
Author: Sean Owen <sean.o...@databricks.com>
Authored: Mon Nov 5 17:34:23 2018 -0600
Committer: Sean Owen <sean.o...@databricks.com>
Committed: Mon Nov 5 17:34:23 2018 -0600

----------------------------------------------------------------------
 .../main/java/org/apache/spark/ExecutorPlugin.java |  6 +++---
 .../java/org/apache/spark/ExecutorPluginSuite.java |  4 ++--
 docs/sql-migration-guide-upgrade.md                |  2 +-
 .../spark/ml/r/AFTSurvivalRegressionWrapper.scala  |  6 +++---
 .../org/apache/spark/ml/stat/Summarizer.scala      |  4 ++--
 .../mllib/stat/MultivariateOnlineSummarizer.scala  |  2 +-
 python/pyspark/ml/stat.py                          |  2 +-
 .../apache/spark/sql/hive/CachedTableSuite.scala   | 17 ++++++++---------
 8 files changed, 21 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/core/src/main/java/org/apache/spark/ExecutorPlugin.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/ExecutorPlugin.java 
b/core/src/main/java/org/apache/spark/ExecutorPlugin.java
index ec0b57f..f86520c 100644
--- a/core/src/main/java/org/apache/spark/ExecutorPlugin.java
+++ b/core/src/main/java/org/apache/spark/ExecutorPlugin.java
@@ -20,18 +20,18 @@ package org.apache.spark;
 import org.apache.spark.annotation.DeveloperApi;
 
 /**
- * A plugin which can be automaticaly instantiated within each Spark executor. 
 Users can specify
+ * A plugin which can be automatically instantiated within each Spark 
executor.  Users can specify
  * plugins which should be created with the "spark.executor.plugins" 
configuration.  An instance
  * of each plugin will be created for every executor, including those created 
by dynamic allocation,
  * before the executor starts running any tasks.
  *
  * The specific api exposed to the end users still considered to be very 
unstable.  We will
- * hopefully be able to keep compatability by providing default 
implementations for any methods
+ * hopefully be able to keep compatibility by providing default 
implementations for any methods
  * added, but make no guarantees this will always be possible across all Spark 
releases.
  *
  * Spark does nothing to verify the plugin is doing legitimate things, or to 
manage the resources
  * it uses.  A plugin acquires the same privileges as the user running the 
task.  A bad plugin
- * could also intefere with task execution and make the executor fail in 
unexpected ways.
+ * could also interfere with task execution and make the executor fail in 
unexpected ways.
  */
 @DeveloperApi
 public interface ExecutorPlugin {

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/core/src/test/java/org/apache/spark/ExecutorPluginSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/spark/ExecutorPluginSuite.java 
b/core/src/test/java/org/apache/spark/ExecutorPluginSuite.java
index 686eb28..80cd702 100644
--- a/core/src/test/java/org/apache/spark/ExecutorPluginSuite.java
+++ b/core/src/test/java/org/apache/spark/ExecutorPluginSuite.java
@@ -63,10 +63,10 @@ public class ExecutorPluginSuite {
 
   @Test
   public void testPluginClassDoesNotExist() {
-    SparkConf conf = initializeSparkConf("nonexistant.plugin");
+    SparkConf conf = initializeSparkConf("nonexistent.plugin");
     try {
       sc = new JavaSparkContext(conf);
-      fail("No exception thrown for nonexistant plugin");
+      fail("No exception thrown for nonexistent plugin");
     } catch (Exception e) {
       // We cannot catch ClassNotFoundException directly because Java doesn't 
think it'll be thrown
       assertTrue(e.toString().startsWith("java.lang.ClassNotFoundException"));

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/docs/sql-migration-guide-upgrade.md
----------------------------------------------------------------------
diff --git a/docs/sql-migration-guide-upgrade.md 
b/docs/sql-migration-guide-upgrade.md
index c9685b8..50458e9 100644
--- a/docs/sql-migration-guide-upgrade.md
+++ b/docs/sql-migration-guide-upgrade.md
@@ -117,7 +117,7 @@ displayTitle: Spark SQL Upgrading Guide
 
   - Since Spark 2.4, Metadata files (e.g. Parquet summary files) and temporary 
files are not counted as data files when calculating table size during 
Statistics computation.
 
-  - Since Spark 2.4, empty strings are saved as quoted empty strings `""`. In 
version 2.3 and earlier, empty strings are equal to `null` values and do not 
reflect to any characters in saved CSV files. For example, the row of `"a", 
null, "", 1` was writted as `a,,,1`. Since Spark 2.4, the same row is saved as 
`a,,"",1`. To restore the previous behavior, set the CSV option `emptyValue` to 
empty (not quoted) string.  
+  - Since Spark 2.4, empty strings are saved as quoted empty strings `""`. In 
version 2.3 and earlier, empty strings are equal to `null` values and do not 
reflect to any characters in saved CSV files. For example, the row of `"a", 
null, "", 1` was written as `a,,,1`. Since Spark 2.4, the same row is saved as 
`a,,"",1`. To restore the previous behavior, set the CSV option `emptyValue` to 
empty (not quoted) string.  
 
   - Since Spark 2.4, The LOAD DATA command supports wildcard `?` and `*`, 
which match any one character, and zero or more characters, respectively. 
Example: `LOAD DATA INPATH '/tmp/folder*/'` or `LOAD DATA INPATH 
'/tmp/part-?'`. Special Characters like `space` also now work in paths. 
Example: `LOAD DATA INPATH '/tmp/folder name/'`.
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala 
b/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala
index 48485e0..1b5f77a 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala
@@ -62,7 +62,7 @@ private[r] object AFTSurvivalRegressionWrapper extends 
MLReadable[AFTSurvivalReg
   private val FORMULA_REGEXP = """Surv\(([^,]+), ([^,]+)\) ~ (.+)""".r
 
   private def formulaRewrite(formula: String): (String, String) = {
-    var rewritedFormula: String = null
+    var rewrittenFormula: String = null
     var censorCol: String = null
     try {
       val FORMULA_REGEXP(label, censor, features) = formula
@@ -71,14 +71,14 @@ private[r] object AFTSurvivalRegressionWrapper extends 
MLReadable[AFTSurvivalReg
         throw new UnsupportedOperationException(
           "Terms of survreg formula can not support dot operator.")
       }
-      rewritedFormula = label.trim + "~" + features.trim
+      rewrittenFormula = label.trim + "~" + features.trim
       censorCol = censor.trim
     } catch {
       case e: MatchError =>
         throw new SparkException(s"Could not parse formula: $formula")
     }
 
-    (rewritedFormula, censorCol)
+    (rewrittenFormula, censorCol)
   }
 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala 
b/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
index d40827e..ed7d7e0 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
@@ -96,7 +96,7 @@ object Summarizer extends Logging {
    *  - numNonzeros: a vector with the number of non-zeros for each 
coefficients
    *  - max: the maximum for each coefficient.
    *  - min: the minimum for each coefficient.
-   *  - normL2: the Euclidian norm for each coefficient.
+   *  - normL2: the Euclidean norm for each coefficient.
    *  - normL1: the L1 norm of each coefficient (sum of the absolute values).
    * @param metrics metrics that can be provided.
    * @return a builder.
@@ -536,7 +536,7 @@ private[ml] object SummaryBuilderImpl extends Logging {
     }
 
     /**
-     * L2 (Euclidian) norm of each dimension.
+     * L2 (Euclidean) norm of each dimension.
      */
     def normL2: Vector = {
       require(requestedMetrics.contains(NormL2))

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
 
b/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
index 8121880..0554b6d 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala
@@ -273,7 +273,7 @@ class MultivariateOnlineSummarizer extends 
MultivariateStatisticalSummary with S
   }
 
   /**
-   * L2 (Euclidian) norm of each dimension.
+   * L2 (Euclidean) norm of each dimension.
    *
    */
   @Since("1.2.0")

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/python/pyspark/ml/stat.py
----------------------------------------------------------------------
diff --git a/python/pyspark/ml/stat.py b/python/pyspark/ml/stat.py
index 370154f..3f42102 100644
--- a/python/pyspark/ml/stat.py
+++ b/python/pyspark/ml/stat.py
@@ -336,7 +336,7 @@ class Summarizer(object):
          - numNonzeros: a vector with the number of non-zeros for each 
coefficients
          - max: the maximum for each coefficient.
          - min: the minimum for each coefficient.
-         - normL2: the Euclidian norm for each coefficient.
+         - normL2: the Euclidean norm for each coefficient.
          - normL1: the L1 norm of each coefficient (sum of the absolute 
values).
 
         :param metrics:

http://git-wip-us.apache.org/repos/asf/spark/blob/c0d1bf03/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
index 569f00c..b492f39 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
@@ -20,7 +20,6 @@ package org.apache.spark.sql.hive
 import java.io.File
 
 import org.apache.spark.sql.{AnalysisException, Dataset, QueryTest, SaveMode}
-import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 import org.apache.spark.sql.catalyst.parser.ParseException
 import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
 import org.apache.spark.sql.execution.datasources.{CatalogFileIndex, 
HadoopFsRelation, LogicalRelation}
@@ -97,24 +96,24 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with TestHiveSingleto
     }
   }
 
-  test("DROP nonexistant table") {
-    sql("DROP TABLE IF EXISTS nonexistantTable")
+  test("DROP nonexistent table") {
+    sql("DROP TABLE IF EXISTS nonexistentTable")
   }
 
-  test("uncache of nonexistant tables") {
-    val expectedErrorMsg = "Table or view not found: nonexistantTable"
+  test("uncache of nonexistent tables") {
+    val expectedErrorMsg = "Table or view not found: nonexistentTable"
     // make sure table doesn't exist
-    var e = 
intercept[AnalysisException](spark.table("nonexistantTable")).getMessage
+    var e = 
intercept[AnalysisException](spark.table("nonexistentTable")).getMessage
     assert(e.contains(expectedErrorMsg))
     e = intercept[AnalysisException] {
-      spark.catalog.uncacheTable("nonexistantTable")
+      spark.catalog.uncacheTable("nonexistentTable")
     }.getMessage
     assert(e.contains(expectedErrorMsg))
     e = intercept[AnalysisException] {
-      sql("UNCACHE TABLE nonexistantTable")
+      sql("UNCACHE TABLE nonexistentTable")
     }.getMessage
     assert(e.contains(expectedErrorMsg))
-    sql("UNCACHE TABLE IF EXISTS nonexistantTable")
+    sql("UNCACHE TABLE IF EXISTS nonexistentTable")
   }
 
   test("no error on uncache of non-cached table") {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to