You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2022/04/06 08:26:38 UTC

[spark] branch branch-3.3 updated: [MINOR][SQL][SS][DOCS] Add varargs to Dataset.observe(String, ..) with a documentation fix

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new fa9cbe21889 [MINOR][SQL][SS][DOCS] Add varargs to Dataset.observe(String, ..) with a documentation fix
fa9cbe21889 is described below

commit fa9cbe21889a3d032687a152ec795ce1dd2db0ff
Author: Hyukjin Kwon <gu...@apache.org>
AuthorDate: Wed Apr 6 17:26:17 2022 +0900

    [MINOR][SQL][SS][DOCS] Add varargs to Dataset.observe(String, ..) with a documentation fix
    
    ### What changes were proposed in this pull request?
    
    This PR proposes two minor changes:
    - Fixes the example at `Dataset.observe(String, ...)`
    - Adds `varargs` to be consistent with another overloaded version: `Dataset.observe(Observation, ..)`
    
    ### Why are the changes needed?
    
    To provide a correct example, support Java APIs properly with `varargs` and API consistency.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, the example is fixed in the documentation. Additionally Java users should be able to use `Dataset.observe(String, ..)` per `varargs`.
    
    ### How was this patch tested?
    
    Manually tested. CI should verify the changes too.
    
    Closes #36084 from HyukjinKwon/minor-docs.
    
    Authored-by: Hyukjin Kwon <gu...@apache.org>
    Signed-off-by: Hyukjin Kwon <gu...@apache.org>
    (cherry picked from commit fb3f380b3834ca24947a82cb8d87efeae6487664)
    Signed-off-by: Hyukjin Kwon <gu...@apache.org>
---
 sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 564eed1ecfd..7d16a2f5eee 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -1979,6 +1979,7 @@ class Dataset[T] private[sql](
   * {{{
   *   // Monitor the metrics using a listener.
   *   spark.streams.addListener(new StreamingQueryListener() {
+  *     override def onQueryStarted(event: QueryStartedEvent): Unit = {}
   *     override def onQueryProgress(event: QueryProgressEvent): Unit = {
   *       event.progress.observedMetrics.asScala.get("my_event").foreach { row =>
   *         // Trigger if the number of errors exceeds 5 percent
@@ -1990,8 +1991,7 @@ class Dataset[T] private[sql](
   *         }
   *       }
   *     }
-  *     def onQueryStarted(event: QueryStartedEvent): Unit = {}
-  *     def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
+  *     override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
   *   })
   *   // Observe row count (rc) and error row count (erc) in the streaming Dataset
   *   val observed_ds = ds.observe("my_event", count(lit(1)).as("rc"), count($"error").as("erc"))
@@ -2001,6 +2001,7 @@ class Dataset[T] private[sql](
   * @group typedrel
   * @since 3.0.0
   */
+  @varargs
   def observe(name: String, expr: Column, exprs: Column*): Dataset[T] = withTypedPlan {
     CollectMetrics(name, (expr +: exprs).map(_.named), logicalPlan)
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org