This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 921fb289f00 [MINOR][DOCS] fix: some minor typos
921fb289f00 is described below

commit 921fb289f003317d89120faa6937e4abd359195c
Author: Eric Blanco <ericjoel.blancoherm...@telefonica.com>
AuthorDate: Thu Jul 27 08:53:54 2023 -0500

    [MINOR][DOCS] fix: some minor typos
    
    ### What changes were proposed in this pull request?
    Change `the the` to `the`
    
    ### Why are the changes needed?
    To fix the typo
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    
    Closes #42188 from ejblanco/docs/spark-typos.
    
    Authored-by: Eric Blanco <ericjoel.blancoherm...@telefonica.com>
    Signed-off-by: Sean Owen <sro...@gmail.com>
---
 .../spark/sql/connect/service/SparkConnectStreamingQueryCache.scala     | 2 +-
 .../org/apache/spark/ui/static/vis-timeline-graph2d.min.js.map          | 2 +-
 dev/connect-jvm-client-mima-check                                       | 2 +-
 .../main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala    | 2 +-
 .../scala/org/apache/spark/sql/catalyst/expressions/WindowTime.scala    | 2 +-
 sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git 
a/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamingQueryCache.scala
 
b/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamingQueryCache.scala
index 133686df018..87004242da9 100644
--- 
a/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamingQueryCache.scala
+++ 
b/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamingQueryCache.scala
@@ -84,7 +84,7 @@ private[connect] class SparkConnectStreamingQueryCache(
 
   /**
    * Returns [[StreamingQuery]] if it is cached and session matches the cached 
query. It ensures
-   * the the session associated with it matches the session passed into the 
call. If the query is
+   * the session associated with it matches the session passed into the call. 
If the query is
    * inactive (i.e. it has a cache expiry time set), this access extends its 
expiry time. So if a
    * client keeps accessing a query, it stays in the cache.
    */
diff --git 
a/core/src/main/resources/org/apache/spark/ui/static/vis-timeline-graph2d.min.js.map
 
b/core/src/main/resources/org/apache/spark/ui/static/vis-timeline-graph2d.min.js.map
index 95fdc523cf4..250b375e545 100644
--- 
a/core/src/main/resources/org/apache/spark/ui/static/vis-timeline-graph2d.min.js.map
+++ 
b/core/src/main/resources/org/apache/spark/ui/static/vis-timeline-graph2d.min.js.map
@@ -1 +1 @@
-{"version":3,"file":"vis-timeline-graph2d.min.js","sources":["../../node_modules/moment/locale/de.js","../../node_modules/moment/moment.js","../../node_modules/moment/locale/es.js","../../node_modules/moment/locale/fr.js","../../node_modules/moment/locale/it.js","../../node_modules/moment/locale/ja.js","../../node_modules/moment/locale/nl.js","../../node_modules/moment/locale/pl.js","../../node_modules/moment/locale/ru.js","../../node_modules/moment/locale/uk.js","../../node_modules/core
 [...]
\ No newline at end of file
+{"version":3,"file":"vis-timeline-graph2d.min.js","sources":["../../node_modules/moment/locale/de.js","../../node_modules/moment/moment.js","../../node_modules/moment/locale/es.js","../../node_modules/moment/locale/fr.js","../../node_modules/moment/locale/it.js","../../node_modules/moment/locale/ja.js","../../node_modules/moment/locale/nl.js","../../node_modules/moment/locale/pl.js","../../node_modules/moment/locale/ru.js","../../node_modules/moment/locale/uk.js","../../node_modules/core
 [...]
\ No newline at end of file
diff --git a/dev/connect-jvm-client-mima-check 
b/dev/connect-jvm-client-mima-check
index ac4b95935b9..6a29cbf08ce 100755
--- a/dev/connect-jvm-client-mima-check
+++ b/dev/connect-jvm-client-mima-check
@@ -52,7 +52,7 @@ echo "finish connect-client-jvm module mima check ..."
 
 RESULT_SIZE=$(wc -l .connect-mima-check-result | awk '{print $1}')
 
-# The the file has no content if check passed.
+# The file has no content if check passed.
 if [[ $RESULT_SIZE -eq "0" ]]; then
   ERRORS=""
 else
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 3ece74a4d18..92e550ea941 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -2623,7 +2623,7 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
               withOrigin(t.origin)(t.copy(hasTried = true))
             } else {
               // This is a nested column, we still have a chance to match 
grouping expressions with
-              // the the top-levle column. Here we wrap the underlying 
`Attribute` with
+              // the top-level column. Here we wrap the underlying `Attribute` 
with
               // `TempResolvedColumn` and try again.
               val childWithTempCol = t.child.transformUp {
                 case a: Attribute => TempResolvedColumn(a, Seq(a.name))
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/WindowTime.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/WindowTime.scala
index 59b5ca8f2bd..07d26fb33cb 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/WindowTime.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/WindowTime.scala
@@ -24,7 +24,7 @@ import org.apache.spark.sql.types._
 @ExpressionDescription(
   usage = """
     _FUNC_(window_column) - Extract the time value from time/session window 
column which can be used for event time value of window.
-      The extracted time is (window.end - 1) which reflects the fact that the 
the aggregating
+      The extracted time is (window.end - 1) which reflects the fact that the 
aggregating
       windows have exclusive upper bound - [start, end)
       See <a 
href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#window-operations-on-event-time";>'Window
 Operations on Event Time'</a> in Structured Streaming guide doc for detailed 
explanation and examples.
   """,
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index fa08bd7ed3b..af2ec777d6b 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -4166,7 +4166,7 @@ object SQLConf {
   val LEGACY_EMPTY_CURRENT_DB_IN_CLI =
     buildConf("spark.sql.legacy.emptyCurrentDBInCli")
       .internal()
-      .doc("When false, spark-sql CLI prints the the current database in 
prompt")
+      .doc("When false, spark-sql CLI prints the current database in prompt.")
       .version("3.4.0")
       .booleanConf
       .createWithDefault(false)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to