You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by se...@apache.org on 2020/09/11 16:33:39 UTC

[flink] 09/10: [hotfix][javadocs] Improve JavaDocs for StreamExecutionEnvironment.addSource(...)

This is an automated email from the ASF dual-hosted git repository.

sewen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 23b551f6f5fd961b8ccb31144d2a0f2801e2feb6
Author: Stephan Ewen <se...@apache.org>
AuthorDate: Wed Sep 2 17:24:54 2020 +0200

    [hotfix][javadocs] Improve JavaDocs for StreamExecutionEnvironment.addSource(...)
---
 .../environment/StreamExecutionEnvironment.java    | 24 ++++++++++++++++++++--
 1 file changed, 22 insertions(+), 2 deletions(-)

diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
index e559147..f5ef633 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
@@ -1605,7 +1605,17 @@ public class StreamExecutionEnvironment {
 	}
 
 	/**
-	 * Add a data {@link Source} to the environment to get a {@link DataStream}.
+	 * Adds a data {@link Source} to the environment to get a {@link DataStream}.
+	 *
+	 * <p>The result will be either a bounded data stream (that can be processed in a batch way) or
+	 * an unbounded data stream (that must be processed in a streaming way), based on the
+	 * boundedness property of the source, as defined by {@link Source#getBoundedness()}.
+	 *
+	 * <p>The result type (that is used to create serializers for the produced data events)
+	 * will be automatically extracted. This is useful for sources that describe the produced types
+	 * already in their configuration, to avoid having to declare the type multiple times.
+	 * For example the file sources and Kafka sources already define the produced byte their
+	 * parsers/serializers/formats, and can forward that information.
 	 *
 	 * @param source
 	 * 		the user defined source
@@ -1624,7 +1634,17 @@ public class StreamExecutionEnvironment {
 	}
 
 	/**
-	 * Add a data {@link Source} to the environment to get a {@link DataStream}.
+	 * Adds a data {@link Source} to the environment to get a {@link DataStream}.
+	 *
+	 * <p>The result will be either a bounded data stream (that can be processed in a batch way) or
+	 * an unbounded data stream (that must be processed in a streaming way), based on the
+	 * boundedness property of the source, as defined by {@link Source#getBoundedness()}.
+	 *
+	 * <p>This method takes an explicit type information for the produced data stream, so that callers
+	 * can define directly what type/serializer will be used for the produced stream.
+	 * For sources that describe their produced type, the method
+	 * {@link #fromSource(Source, WatermarkStrategy, String)} can be used to avoid specifying the
+	 * produced type redundantly.
 	 *
 	 * @param source
 	 * 		the user defined source