This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 952d476786f [MINOR][SQL][SS][DOCS] Add varargs to 
Dataset.observe(String, ..) with a documentation fix
952d476786f is described below

commit 952d476786ff7e2f5216094b272d46a253891358
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Wed Apr 6 17:26:17 2022 +0900

    [MINOR][SQL][SS][DOCS] Add varargs to Dataset.observe(String, ..) with a 
documentation fix
    
    ### What changes were proposed in this pull request?
    
    This PR proposes two minor changes:
    - Fixes the example at `Dataset.observe(String, ...)`
    - Adds `varargs` to be consistent with another overloaded version: 
`Dataset.observe(Observation, ..)`
    
    ### Why are the changes needed?
    
    To provide a correct example, support Java APIs properly with `varargs` and 
API consistency.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, the example is fixed in the documentation. Additionally Java users 
should be able to use `Dataset.observe(String, ..)` per `varargs`.
    
    ### How was this patch tested?
    
    Manually tested. CI should verify the changes too.
    
    Closes #36084 from HyukjinKwon/minor-docs.
    
    Authored-by: Hyukjin Kwon <gurwls...@apache.org>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
    (cherry picked from commit fb3f380b3834ca24947a82cb8d87efeae6487664)
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index cb492d96406..92380f3e553 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -1928,6 +1928,7 @@ class Dataset[T] private[sql](
   * {{{
   *   // Monitor the metrics using a listener.
   *   spark.streams.addListener(new StreamingQueryListener() {
+  *     override def onQueryStarted(event: QueryStartedEvent): Unit = {}
   *     override def onQueryProgress(event: QueryProgressEvent): Unit = {
   *       event.progress.observedMetrics.asScala.get("my_event").foreach { row 
=>
   *         // Trigger if the number of errors exceeds 5 percent
@@ -1939,8 +1940,7 @@ class Dataset[T] private[sql](
   *         }
   *       }
   *     }
-  *     def onQueryStarted(event: QueryStartedEvent): Unit = {}
-  *     def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
+  *     override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
   *   })
   *   // Observe row count (rc) and error row count (erc) in the streaming 
Dataset
   *   val observed_ds = ds.observe("my_event", count(lit(1)).as("rc"), 
count($"error").as("erc"))
@@ -1950,6 +1950,7 @@ class Dataset[T] private[sql](
   * @group typedrel
   * @since 3.0.0
   */
+  @varargs
   def observe(name: String, expr: Column, exprs: Column*): Dataset[T] = 
withTypedPlan {
     CollectMetrics(name, (expr +: exprs).map(_.named), logicalPlan)
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to