You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by fh...@apache.org on 2015/10/29 13:56:27 UTC

[07/10] flink git commit: [FLINK-2559] Clean up JavaDocs

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
index 33d5a3c..3722908 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
@@ -133,7 +133,8 @@ public class SingleOutputStreamOperator<T, O extends SingleOutputStreamOperator<
 
 	/**
 	 * Turns off chaining for this operator so thread co-location will not be
-	 * used as an optimization. </p> Chaining can be turned off for the whole
+	 * used as an optimization.
+	 * <p> Chaining can be turned off for the whole
 	 * job by {@link StreamExecutionEnvironment#disableOperatorChaining()}
 	 * however it is not advised for performance considerations.
 	 * 
@@ -279,7 +280,8 @@ public class SingleOutputStreamOperator<T, O extends SingleOutputStreamOperator<
 	 * maximum parallelism operator in that group. Task chaining is only
 	 * possible within one resource group. By calling this method, this
 	 * operators starts a new resource group and all subsequent operators will
-	 * be added to this group unless specified otherwise. </p> Please note that
+	 * be added to this group unless specified otherwise.
+	 * <p> Please note that
 	 * local executions have by default as many available task slots as the
 	 * environment parallelism, so in order to start a new resource group the
 	 * degree of parallelism for the operators must be decreased from the
@@ -296,7 +298,8 @@ public class SingleOutputStreamOperator<T, O extends SingleOutputStreamOperator<
 	 * Isolates the operator in its own resource group. This will cause the
 	 * operator to grab as many task slots as its degree of parallelism. If
 	 * there are no free resources available, the job will fail to start. It
-	 * also disables chaining for this operator </p>All subsequent operators are
+	 * also disables chaining for this operator.
+	 * <p>All subsequent operators are
 	 * assigned to the default resource group.
 	 * 
 	 * @return The operator with isolated resource group.

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
index 98506e0..210447d 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
@@ -183,7 +183,7 @@ public abstract class StreamExecutionEnvironment {
 	 * output buffers. By default the output buffers flush frequently to provide
 	 * low latency and to aid smooth developer experience. Setting the parameter
 	 * can result in three logical modes:
-	 * <p/>
+	 * <p>
 	 * <ul>
 	 * <li>
 	 * A positive integer triggers flushing periodically by that integer</li>
@@ -466,7 +466,7 @@ public abstract class StreamExecutionEnvironment {
 
 	/**
 	 * Adds a new Kryo default serializer to the Runtime.
-	 * <p/>
+	 * <p>
 	 * Note that the serializer instance must be serializable (as defined by
 	 * java.io.Serializable), because it may be distributed to the worker nodes
 	 * by java serialization.
@@ -495,7 +495,7 @@ public abstract class StreamExecutionEnvironment {
 
 	/**
 	 * Registers the given type with a Kryo Serializer.
-	 * <p/>
+	 * <p>
 	 * Note that the serializer instance must be serializable (as defined by
 	 * java.io.Serializable), because it may be distributed to the worker nodes
 	 * by java serialization.
@@ -851,7 +851,7 @@ public abstract class StreamExecutionEnvironment {
 	 * objects,
 	 * rather than Java Strings. StringValues can be used to tune implementations to be less object and garbage
 	 * collection heavy.
-	 * <p/>
+	 * <p>
 	 * The file will be read with the system's default character set.
 	 *
 	 * @param filePath
@@ -873,7 +873,7 @@ public abstract class StreamExecutionEnvironment {
 	 * objects, rather than Java Strings. StringValues can be used to tune implementations to be less object and
 	 * garbage
 	 * collection heavy.
-	 * <p/>
+	 * <p>
 	 * The {@link java.nio.charset.Charset} with the given name will be used to read the files.
 	 *
 	 * @param filePath
@@ -992,7 +992,7 @@ public abstract class StreamExecutionEnvironment {
 	 * Creates a new data stream that contains the strings received infinitely from a socket. Received strings are
 	 * decoded by the system's default character set. On the termination of the socket server connection retries can be
 	 * initiated.
-	 * <p/>
+	 * <p>
 	 * Let us note that the socket itself does not report on abort and as a consequence retries are only initiated when
 	 * the socket was gracefully terminated.
 	 *
@@ -1050,7 +1050,7 @@ public abstract class StreamExecutionEnvironment {
 
 	/**
 	 * Generic method to create an input data stream with {@link org.apache.flink.api.common.io.InputFormat}.
-	 * <p/>
+	 * <p>
 	 * Since all data streams need specific information about their types, this method needs to determine the type of
 	 * the data produced by the input format. It will attempt to determine the data type by reflection, unless the
 	 * input
@@ -1194,7 +1194,7 @@ public abstract class StreamExecutionEnvironment {
 	 * Triggers the program execution. The environment will execute all parts of
 	 * the program that have resulted in a "sink" operation. Sink operations are
 	 * for example printing results or forwarding them to a message queue.
-	 * <p/>
+	 * <p>
 	 * The program execution will be logged and displayed with a generated
 	 * default name.
 	 *
@@ -1209,7 +1209,7 @@ public abstract class StreamExecutionEnvironment {
 	 * Triggers the program execution. The environment will execute all parts of
 	 * the program that have resulted in a "sink" operation. Sink operations are
 	 * for example printing results or forwarding them to a message queue.
-	 * <p/>
+	 * <p>
 	 * The program execution will be logged and displayed with the provided name
 	 *
 	 * @param jobName

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/RichSourceFunction.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/RichSourceFunction.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/RichSourceFunction.java
index dd08b2a..24f1a41 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/RichSourceFunction.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/RichSourceFunction.java
@@ -36,7 +36,6 @@ import org.apache.flink.api.common.functions.AbstractRichFunction;
  *     <li>Use {@link org.apache.flink.api.common.functions.RuntimeContext#getIndexOfThisSubtask()} to
  *         determine which subtask the current instance of the function executes.</li>
  * </ul>
- * </p>
  *
  * @param <OUT> The type of the records produced by this source.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java
index 886d6e7..0e201e4 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java
@@ -43,12 +43,12 @@ import java.io.Serializable;
  * This is the basic pattern one should follow when implementing a (checkpointed) source:
  * </p>
  *
- * {@code
+ * <pre>{@code
  *  public class ExampleSource<T> implements SourceFunction<T>, Checkpointed<Long> {
  *      private long count = 0L;
  *      private volatile boolean isRunning = true;
  *
- *      @Override
+ *      {@literal @}Override
  *      public void run(SourceContext<T> ctx) {
  *          while (isRunning && count < 1000) {
  *              synchronized (ctx.getCheckpointLock()) {
@@ -58,18 +58,18 @@ import java.io.Serializable;
  *          }
  *      }
  *
- *      @Override
+ *      {@literal @}Override
  *      public void cancel() {
  *          isRunning = false;
  *      }
  *
- *      @Override
+ *      {@literal @}Override
  *      public Long snapshotState(long checkpointId, long checkpointTimestamp) { return count; }
  *
- *      @Override
+ *      {@literal @}Override
  *      public void restoreState(Long state) { this.count = state; }
  * }
- * </pre>
+ * }</pre>
  *
  *
  * <p>

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
index 4a87eb3..8bd0e48 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
@@ -61,7 +61,7 @@ import java.util.Map;
  * in the graph with the desired property. For example, if you have this graph:
  *
  * <pre>
- *     Map-1 -> HashPartition-2 -> Map-3
+ *     Map-1 -&gt; HashPartition-2 -&gt; Map-3
  * </pre>
  *
  * where the numbers represent transformation IDs. We first recurse all the way down. {@code Map-1}

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/StreamTransformation.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/StreamTransformation.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/StreamTransformation.java
index 4e6dc42..fd48fa7 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/StreamTransformation.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/StreamTransformation.java
@@ -47,7 +47,7 @@ import java.util.Collection;
  * <p>
  * The following graph of {@code StreamTransformations}:
  *
- * <pre>
+ * <pre>{@code
  *   Source              Source        
  *      +                   +           
  *      |                   |           
@@ -72,11 +72,11 @@ import java.util.Collection;
  *                |                     
  *                v                     
  *              Sink 
- * </pre>
+ * }</pre>
  *
  * Would result in this graph of operations at runtime:
  *
- * <pre>
+ * <pre>{@code
  *  Source              Source
  *    +                   +
  *    |                   |
@@ -86,7 +86,7 @@ import java.util.Collection;
  *              |
  *              v
  *             Sink
- * </pre>
+ * }</pre>
  *
  * The information about partitioning, union, split/select end up being encoded in the edges
  * that connect the sources to the map operation.
@@ -233,8 +233,7 @@ public abstract class StreamTransformation<T> {
 	/**
 	 * Returns the buffer timeout of this {@code StreamTransformation}.
 	 *
-	 * <p>
-	 * {@see #setBufferTimeout}
+	 * @see #setBufferTimeout(long)
 	 */
 	public long getBufferTimeout() {
 		return bufferTimeout;
@@ -253,8 +252,7 @@ public abstract class StreamTransformation<T> {
 	/**
 	 * Returns the {@code ResourceStrategy} of this {@code StreamTransformation}.
 	 *
-	 * <p>
-	 * {@see #setResourceStrategy}
+	 * @see #setResourceStrategy(StreamGraph.ResourceStrategy)
 	 */
 	public StreamGraph.ResourceStrategy getResourceStrategy() {
 		return resourceStrategy;

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/time/Time.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/time/Time.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/time/Time.java
index 7f9356a..e0e9202 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/time/Time.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/time/Time.java
@@ -54,8 +54,7 @@ public final class Time extends AbstractTime {
 	 * Creates a new {@link Time} of the given duration and {@link TimeUnit}.
 	 *
 	 * <p>The {@code Time} refers to the time characteristic that is set on the dataflow via
-	 * {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#
-	 * setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}.
+	 * {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}.
 	 *
 	 * @param size The duration of time.
 	 * @param unit The unit of time of the duration, for example {@code TimeUnit.SECONDS}.

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
index 8c58e29..bae0128 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
@@ -63,7 +63,7 @@ import org.slf4j.LoggerFactory;
  * thus have multiple ends.
  *
  * The life cycle of the task is set up as follows: 
- * <pre>
+ * <pre>{@code
  *  -- registerInputOutput()
  *         |
  *         +----> Create basic utils (config, etc) and load the chain of operators
@@ -80,7 +80,7 @@ import org.slf4j.LoggerFactory;
  *        +----> dispose-operators()
  *        +----> common cleanup
  *        +----> task specific cleanup()
- * </pre>
+ * }</pre>
  *
  * <p> The {@code StreamTask} has a lock object called {@code lock}. All calls to methods on a
  * {@code StreamOperator} must be synchronized on this lock object to ensure that no methods

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/OneInputStreamTaskTestHarness.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/OneInputStreamTaskTestHarness.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/OneInputStreamTaskTestHarness.java
index 7fb8ba3..f6e297c 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/OneInputStreamTaskTestHarness.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/OneInputStreamTaskTestHarness.java
@@ -49,7 +49,7 @@ import java.io.IOException;
  *
  * <p>
  * When using this you need to add the following line to your test class to setup Powermock:
- * {@code @PrepareForTest({ResultPartitionWriter.class})}
+ * {@code {@literal @}PrepareForTest({ResultPartitionWriter.class})}
  */
 public class OneInputStreamTaskTestHarness<IN, OUT> extends StreamTaskTestHarness<OUT> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTestHarness.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTestHarness.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTestHarness.java
index 6c48668..655608b 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTestHarness.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTestHarness.java
@@ -60,7 +60,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
  *
  * <p>
  * When using this you need to add the following line to your test class to setup Powermock:
- * {@code @PrepareForTest({ResultPartitionWriter.class})}
+ * {@code {@literal @}PrepareForTest({ResultPartitionWriter.class})}
  */
 public class StreamTaskTestHarness<OUT> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TwoInputStreamTaskTestHarness.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TwoInputStreamTaskTestHarness.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TwoInputStreamTaskTestHarness.java
index 2b20101..13a9782 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TwoInputStreamTaskTestHarness.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TwoInputStreamTaskTestHarness.java
@@ -57,7 +57,7 @@ import java.util.List;
  *
  * <p>
  * When using this you need to add the following line to your test class to setup Powermock:
- * {@code @PrepareForTest({ResultPartitionWriter.class})}
+ * {@code {@literal @}PrepareForTest({ResultPartitionWriter.class})}
  */
 public class TwoInputStreamTaskTestHarness<IN1, IN2, OUT> extends StreamTaskTestHarness<OUT> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-test-utils/src/main/java/org/apache/flink/test/util/MultipleProgramsTestBase.java
----------------------------------------------------------------------
diff --git a/flink-test-utils/src/main/java/org/apache/flink/test/util/MultipleProgramsTestBase.java b/flink-test-utils/src/main/java/org/apache/flink/test/util/MultipleProgramsTestBase.java
index 2ea61d7..27710d7 100644
--- a/flink-test-utils/src/main/java/org/apache/flink/test/util/MultipleProgramsTestBase.java
+++ b/flink-test-utils/src/main/java/org/apache/flink/test/util/MultipleProgramsTestBase.java
@@ -38,14 +38,14 @@ import java.util.Collection;
  *
  * <pre>{@code
  *
- *   @Test
+ *   {@literal @}Test
  *   public void someTest() {
  *       ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  *       // test code
  *       env.execute();
  *   }
  *
- *   @Test
+ *   {@literal @}Test
  *   public void anotherTest() {
  *       ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  *       // test code

http://git-wip-us.apache.org/repos/asf/flink/blob/e1f30b04/flink-tests/src/test/java/org/apache/flink/test/recordJobs/wordcount/WordCount.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/recordJobs/wordcount/WordCount.java b/flink-tests/src/test/java/org/apache/flink/test/recordJobs/wordcount/WordCount.java
index 96eb1fc..2fcc523 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/recordJobs/wordcount/WordCount.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/recordJobs/wordcount/WordCount.java
@@ -45,6 +45,12 @@ import java.util.concurrent.TimeUnit;
 /**
  * Implements a word count which takes the input file and counts the number of
  * the occurrences of each word in the file.
+ *
+ * <br><br>
+ *
+ * <b>Note</b>: This example uses the out-dated Record API.
+ * It is recommended to use the new Java API.
+ *
  */
 @SuppressWarnings("deprecation")
 public class WordCount implements Program, ProgramDescription {