You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by fh...@apache.org on 2015/10/29 13:22:40 UTC

[07/13] flink git commit: [FLINK-2559] Clean up JavaDocs

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/operators/join/JoinOperatorSetsBase.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/join/JoinOperatorSetsBase.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/join/JoinOperatorSetsBase.java
index 6f00cdb..e14e06d 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/operators/join/JoinOperatorSetsBase.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/join/JoinOperatorSetsBase.java
@@ -33,7 +33,7 @@ import org.apache.flink.api.java.tuple.Tuple;
 import org.apache.flink.api.java.typeutils.TypeExtractor;
 
 /**
- * Intermediate step of an Outer Join transformation. <br/>
+ * Intermediate step of an Outer Join transformation. <br>
  * To continue the Join transformation, select the join key of the first input {@link DataSet} by calling
  * {@link JoinOperatorSetsBase#where(int...)} or
  * {@link JoinOperatorSetsBase#where(KeySelector)}.
@@ -69,9 +69,9 @@ public class JoinOperatorSetsBase<I1, I2> {
 	}
 
 	/**
-	 * Continues a Join transformation. <br/>
-	 * Defines the {@link Tuple} fields of the first join {@link DataSet} that should be used as join keys.<br/>
-	 * <b>Note: Fields can only be selected as join keys on Tuple DataSets.</b><br/>
+	 * Continues a Join transformation. <br>
+	 * Defines the {@link Tuple} fields of the first join {@link DataSet} that should be used as join keys.<br>
+	 * <b>Note: Fields can only be selected as join keys on Tuple DataSets.</b><br>
 	 *
 	 * @param fields The indexes of the other Tuple fields of the first join DataSets that should be used as keys.
 	 * @return An incomplete Join transformation.
@@ -87,7 +87,7 @@ public class JoinOperatorSetsBase<I1, I2> {
 	}
 
 	/**
-	 * Continues a Join transformation. <br/>
+	 * Continues a Join transformation. <br>
 	 * Defines the fields of the first join {@link DataSet} that should be used as grouping keys. Fields
 	 * are the names of member fields of the underlying type of the data set.
 	 *
@@ -105,9 +105,9 @@ public class JoinOperatorSetsBase<I1, I2> {
 	}
 
 	/**
-	 * Continues a Join transformation and defines a {@link KeySelector} function for the first join {@link DataSet}.</br>
+	 * Continues a Join transformation and defines a {@link KeySelector} function for the first join {@link DataSet}.<br>
 	 * The KeySelector function is called for each element of the first DataSet and extracts a single
-	 * key value on which the DataSet is joined. </br>
+	 * key value on which the DataSet is joined. <br>
 	 *
 	 * @param keySelector The KeySelector function which extracts the key values from the DataSet on which it is joined.
 	 * @return An incomplete Join transformation.
@@ -125,7 +125,7 @@ public class JoinOperatorSetsBase<I1, I2> {
 
 
 	/**
-	 * Intermediate step of a Join transformation. <br/>
+	 * Intermediate step of a Join transformation. <br>
 	 * To continue the Join transformation, select the join key of the second input {@link DataSet} by calling
 	 * {@link org.apache.flink.api.java.operators.join.JoinOperatorSetsBase.JoinOperatorSetsPredicateBase#equalTo(int...)} or
 	 * {@link org.apache.flink.api.java.operators.join.JoinOperatorSetsBase.JoinOperatorSetsPredicateBase#equalTo(KeySelector)}.
@@ -149,8 +149,8 @@ public class JoinOperatorSetsBase<I1, I2> {
 
 		/**
 		 * Continues a Join transformation and defines the {@link Tuple} fields of the second join
-		 * {@link DataSet} that should be used as join keys.<br/>
-		 * <b>Note: Fields can only be selected as join keys on Tuple DataSets.</b><br/>
+		 * {@link DataSet} that should be used as join keys.<br>
+		 * <b>Note: Fields can only be selected as join keys on Tuple DataSets.</b><br>
 		 *
 		 * The resulting {@link JoinFunctionAssigner} needs to be finished by providing a
 		 * {@link JoinFunction} by calling {@link JoinFunctionAssigner#with(JoinFunction)}
@@ -164,7 +164,7 @@ public class JoinOperatorSetsBase<I1, I2> {
 
 		/**
 		 * Continues a Join transformation and defines the fields of the second join
-		 * {@link DataSet} that should be used as join keys.<br/>
+		 * {@link DataSet} that should be used as join keys.<br>
 		 *
 		 * The resulting {@link JoinFunctionAssigner} needs to be finished by providing a
 		 * {@link JoinFunction} by calling {@link JoinFunctionAssigner#with(JoinFunction)}
@@ -177,9 +177,9 @@ public class JoinOperatorSetsBase<I1, I2> {
 		}
 
 		/**
-		 * Continues a Join transformation and defines a {@link KeySelector} function for the second join {@link DataSet}.</br>
+		 * Continues a Join transformation and defines a {@link KeySelector} function for the second join {@link DataSet}.<br>
 		 * The KeySelector function is called for each element of the second DataSet and extracts a single
-		 * key value on which the DataSet is joined. </br>
+		 * key value on which the DataSet is joined. <br>
 		 *
 		 * The resulting {@link JoinFunctionAssigner} needs to be finished by providing a
 		 * {@link JoinFunction} by calling {@link JoinFunctionAssigner#with(JoinFunction)}

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3WrappingCollector.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3WrappingCollector.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3WrappingCollector.java
index 66592f5..29d68f1 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3WrappingCollector.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3WrappingCollector.java
@@ -22,7 +22,7 @@ import org.apache.flink.api.java.tuple.Tuple3;
 import org.apache.flink.util.Collector;
 
 /**
- * Needed to wrap tuples to Tuple3<groupKey, sortKey, value> for combine method of group reduce with key selector sorting
+ * Needed to wrap tuples to {@code Tuple3<groupKey, sortKey, value>} for combine method of group reduce with key selector sorting
  */
 public class Tuple3WrappingCollector<IN, K1, K2> implements Collector<IN>, java.io.Serializable {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/TupleWrappingCollector.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/TupleWrappingCollector.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/TupleWrappingCollector.java
index 43f667f..8f0b4fa 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/TupleWrappingCollector.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/TupleWrappingCollector.java
@@ -22,7 +22,7 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.util.Collector;
 
 /**
- * Needed to wrap tuples to Tuple2<key, value> pairs for combine method of group reduce with key selector function
+ * Needed to wrap tuples to {@code Tuple2<key, value>} pairs for combine method of group reduce with key selector function
  */
 public class TupleWrappingCollector<IN, K> implements Collector<IN>, java.io.Serializable {
 	

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/record/functions/FunctionAnnotation.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/record/functions/FunctionAnnotation.java b/flink-java/src/main/java/org/apache/flink/api/java/record/functions/FunctionAnnotation.java
index d7a17b8..71d2a62 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/record/functions/FunctionAnnotation.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/record/functions/FunctionAnnotation.java
@@ -44,8 +44,8 @@ import org.apache.flink.api.common.operators.util.UserCodeWrapper;
  * annotation for a map-type function that realizes a simple absolute function,
  * use it the following way:
  * 
- * <pre><blockquote>
- * \@ConstantFieldsExcept(fields={2})
+ * <pre>{@code
+ * {@literal @}ConstantFieldsExcept(fields={2})
  * public class MyMapper extends MapFunction
  * {
  *     public void map(Record record, Collector out)
@@ -56,7 +56,7 @@ import org.apache.flink.api.common.operators.util.UserCodeWrapper;
 		out.collect(record);
  *     }
  * }
- * </blockquote></pre>
+ * }</pre>
  * 
  * Be aware that some annotations should only be used for functions with as single input 
  * ({@link MapFunction}, {@link ReduceFunction}) and some only for stubs with two inputs 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/record/io/ExternalProcessInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/record/io/ExternalProcessInputFormat.java b/flink-java/src/main/java/org/apache/flink/api/java/record/io/ExternalProcessInputFormat.java
index b1f7ffc..cbf16b5 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/record/io/ExternalProcessInputFormat.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/record/io/ExternalProcessInputFormat.java
@@ -32,7 +32,7 @@ import org.apache.flink.core.io.GenericInputSplit;
  * The input format checks the exit code of the process to validate whether the process terminated correctly. A list of allowed exit codes can be provided.
  * The input format requires ({@link ExternalProcessInputSplit} objects that hold the command to execute.
  * 
- * <b>Attention! </b><br/>
+ * <b>Attention! </b><br>
  * You must take care to read from (and process) both output streams of the process, standard out (stdout) and standard error (stderr). 
  * Otherwise, the input format might get deadlocked! 
  * 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/typeutils/AvroTypeInfo.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/AvroTypeInfo.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/AvroTypeInfo.java
index 59c7eb0..5f74513 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/AvroTypeInfo.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/AvroTypeInfo.java
@@ -29,8 +29,8 @@ import java.util.List;
 /**
  * Special type information to generate a special AvroTypeInfo for Avro POJOs (implementing SpecificRecordBase, the typed Avro POJOs)
  *
- * Proceeding: It uses a regular pojo type analysis and replaces all GenericType<CharSequence>
- *     with a GenericType<avro.Utf8>.
+ * Proceeding: It uses a regular pojo type analysis and replaces all {@code GenericType<CharSequence>}
+ *     with a {@code GenericType<avro.Utf8>}.
  * All other types used by Avro are standard Java types.
  * Only strings are represented as CharSequence fields and represented as Utf8 classes at runtime.
  * CharSequence is not comparable. To make them nicely usable with field expressions, we replace them here

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/Serializers.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/Serializers.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/Serializers.java
index 9e0da25..76f8eb4 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/Serializers.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/Serializers.java
@@ -107,8 +107,8 @@ public class Serializers {
 	}
 
 	/**
-	 * Register these serializers for using Avro's {@see GenericData.Record} and classes
-	 * implementing {@see org.apache.avro.specific.SpecificRecordBase}
+	 * Register these serializers for using Avro's {@link GenericData.Record} and classes
+	 * implementing {@link org.apache.avro.specific.SpecificRecordBase}
 	 */
 	public static void registerGenericAvro(ExecutionConfig reg) {
 		// Avro POJOs contain java.util.List which have GenericData.Array as their runtime type

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java/src/main/java/org/apache/flink/api/java/utils/DataSetUtils.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/utils/DataSetUtils.java b/flink-java/src/main/java/org/apache/flink/api/java/utils/DataSetUtils.java
index 8f9fa3f..b91dc82 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/utils/DataSetUtils.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/utils/DataSetUtils.java
@@ -206,7 +206,7 @@ public final class DataSetUtils {
 	 * <p>
 	 * <strong>NOTE:</strong> Sample with fixed size is not as efficient as sample with fraction, use sample with
 	 * fraction unless you need exact precision.
-	 * <p/>
+	 * </p>
 	 *
 	 * @param withReplacement Whether element can be selected more than once.
 	 * @param numSample       The expected sample size.
@@ -225,7 +225,7 @@ public final class DataSetUtils {
 	 * <p>
 	 * <strong>NOTE:</strong> Sample with fixed size is not as efficient as sample with fraction, use sample with
 	 * fraction unless you need exact precision.
-	 * <p/>
+	 * </p>
 	 *
 	 * @param withReplacement Whether element can be selected more than once.
 	 * @param numSample       The expected sample size.

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-java8/src/main/java/org/apache/flink/examples/java8/relational/TPCHQuery10.java
----------------------------------------------------------------------
diff --git a/flink-java8/src/main/java/org/apache/flink/examples/java8/relational/TPCHQuery10.java b/flink-java8/src/main/java/org/apache/flink/examples/java8/relational/TPCHQuery10.java
index 4dae630..1ad4f41 100644
--- a/flink-java8/src/main/java/org/apache/flink/examples/java8/relational/TPCHQuery10.java
+++ b/flink-java8/src/main/java/org/apache/flink/examples/java8/relational/TPCHQuery10.java
@@ -35,7 +35,7 @@ import org.apache.flink.api.java.tuple.Tuple6;
  * This program implements the following SQL equivalent:
  * 
  * <p>
- * <code><pre>
+ * <pre>{@code
  * SELECT 
  *        c_custkey,
  *        c_name, 
@@ -60,7 +60,7 @@ import org.apache.flink.api.java.tuple.Tuple6;
  *        c_acctbal, 
  *        n_name, 
  *        c_address
- * </pre></code>
+ * }</pre>
  *        
  * <p>
  * Compared to the original TPC-H query this version does not print 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/Graph.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/Graph.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/Graph.java
index 68bfb5b..4f603f7 100755
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/Graph.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/Graph.java
@@ -352,10 +352,10 @@ public class Graph<K, VV, EV> {
 	* @return An instance of {@link org.apache.flink.graph.GraphCsvReader}, 
 	* on which calling methods to specify types of the Vertex ID, Vertex value and Edge value returns a Graph.
 	* 
-	* @see {@link org.apache.flink.graph.GraphCsvReader#types(Class, Class, Class)},
-	* {@link org.apache.flink.graph.GraphCsvReader#vertexTypes(Class, Class)},
-	* {@link org.apache.flink.graph.GraphCsvReader#edgeTypes(Class, Class)} and
-	* {@link org.apache.flink.graph.GraphCsvReader#keyType(Class)}.
+	* @see org.apache.flink.graph.GraphCsvReader#types(Class, Class, Class)
+	* @see org.apache.flink.graph.GraphCsvReader#vertexTypes(Class, Class)
+	* @see org.apache.flink.graph.GraphCsvReader#edgeTypes(Class, Class)
+	* @see org.apache.flink.graph.GraphCsvReader#keyType(Class)
 	*/
 	public static GraphCsvReader fromCsvReader(String verticesPath, String edgesPath, ExecutionEnvironment context) {
 		return new GraphCsvReader(verticesPath, edgesPath, context);
@@ -369,10 +369,10 @@ public class Graph<K, VV, EV> {
 	* @return An instance of {@link org.apache.flink.graph.GraphCsvReader},
 	* on which calling methods to specify types of the Vertex ID, Vertex value and Edge value returns a Graph.
 	* 
-	* @see {@link org.apache.flink.graph.GraphCsvReader#types(Class, Class, Class)},
-	* {@link org.apache.flink.graph.GraphCsvReader#vertexTypes(Class, Class)},
-	* {@link org.apache.flink.graph.GraphCsvReader#edgeTypes(Class, Class)} and
-	* {@link org.apache.flink.graph.GraphCsvReader#keyType(Class)}.
+	* @see org.apache.flink.graph.GraphCsvReader#types(Class, Class, Class)
+	* @see org.apache.flink.graph.GraphCsvReader#vertexTypes(Class, Class)
+	* @see org.apache.flink.graph.GraphCsvReader#edgeTypes(Class, Class)
+	* @see org.apache.flink.graph.GraphCsvReader#keyType(Class)
 	*/
 	public static GraphCsvReader fromCsvReader(String edgesPath, ExecutionEnvironment context) {
 		return new GraphCsvReader(edgesPath, context);
@@ -389,10 +389,10 @@ public class Graph<K, VV, EV> {
 	 * @return An instance of {@link org.apache.flink.graph.GraphCsvReader},
 	 * on which calling methods to specify types of the Vertex ID, Vertex Value and Edge value returns a Graph.
 	 * 
-	 * @see {@link org.apache.flink.graph.GraphCsvReader#types(Class, Class, Class)},
-	 * {@link org.apache.flink.graph.GraphCsvReader#vertexTypes(Class, Class)},
-	 * {@link org.apache.flink.graph.GraphCsvReader#edgeTypes(Class, Class)} and
-	 * {@link org.apache.flink.graph.GraphCsvReader#keyType(Class)}.
+	 * @see org.apache.flink.graph.GraphCsvReader#types(Class, Class, Class)
+	 * @see org.apache.flink.graph.GraphCsvReader#vertexTypes(Class, Class)
+	 * @see org.apache.flink.graph.GraphCsvReader#edgeTypes(Class, Class)
+	 * @see org.apache.flink.graph.GraphCsvReader#keyType(Class)
 	 */
 	public static <K, VV> GraphCsvReader fromCsvReader(String edgesPath,
 			final MapFunction<K, VV> vertexValueInitializer, ExecutionEnvironment context) {
@@ -821,7 +821,7 @@ public class Graph<K, VV, EV> {
 	/**
 	 * Return the out-degree of all vertices in the graph
 	 * 
-	 * @return A DataSet of Tuple2<vertexId, outDegree>
+	 * @return A DataSet of {@code Tuple2<vertexId, outDegree>}
 	 */
 	public DataSet<Tuple2<K, Long>> outDegrees() {
 
@@ -851,7 +851,7 @@ public class Graph<K, VV, EV> {
 	/**
 	 * Return the in-degree of all vertices in the graph
 	 * 
-	 * @return A DataSet of Tuple2<vertexId, inDegree>
+	 * @return A DataSet of {@code Tuple2<vertexId, inDegree>}
 	 */
 	public DataSet<Tuple2<K, Long>> inDegrees() {
 
@@ -861,7 +861,7 @@ public class Graph<K, VV, EV> {
 	/**
 	 * Return the degree of all vertices in the graph
 	 * 
-	 * @return A DataSet of Tuple2<vertexId, degree>
+	 * @return A DataSet of {@code Tuple2<vertexId, degree>}
 	 */
 	public DataSet<Tuple2<K, Long>> getDegrees() {
 		return outDegrees().union(inDegrees()).groupBy(0).sum(1);

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/EuclideanGraphWeighing.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/EuclideanGraphWeighing.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/EuclideanGraphWeighing.java
index 2a10bd1..712be3e 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/EuclideanGraphWeighing.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/EuclideanGraphWeighing.java
@@ -35,7 +35,7 @@ import java.io.Serializable;
 
 /**
  * This example shows how to use Gelly's {@link Graph#getTriplets()} and
- * {@link Graph#joinWithEdges(DataSet, MapFunction)} methods.
+ * {@link Graph#joinWithEdges(DataSet, EdgeJoinFunction)} methods.
  * 
  * Given a directed, unweighted graph, with vertex values representing points in a plan,
  * return a weighted graph where the edge weights are equal to the Euclidean distance between the
@@ -51,7 +51,6 @@ import java.io.Serializable;
  * 	Edges themselves are separated by newlines.
  * 	For example: <code>1,2\n1,3\n</code> defines two edges 1-2 and 1-3.
  * </ul>
- * </p>
  *
  * Usage <code>EuclideanGraphWeighing &lt;vertex path&gt; &lt;edge path&gt; &lt;result path&gt;</code><br>
  * If no parameters are provided, the program is run with default data from

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/GraphMetrics.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/GraphMetrics.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/GraphMetrics.java
index b808e76..117f7d1 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/GraphMetrics.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/GraphMetrics.java
@@ -42,7 +42,7 @@ import org.apache.flink.types.NullValue;
  *
  * The input file is expected to contain one edge per line,
  * with long IDs and no values, in the following format:
- * "<sourceVertexID>\t<targetVertexID>".
+ * "&lt;sourceVertexID&gt;\t&lt;targetVertexID&gt;".
  * If no arguments are provided, the example runs with a random graph of 100 vertices.
  *
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/IncrementalSSSP.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/IncrementalSSSP.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/IncrementalSSSP.java
index c03937d..947f343 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/IncrementalSSSP.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/IncrementalSSSP.java
@@ -52,7 +52,7 @@ import org.apache.flink.graph.spargel.VertexUpdateFunction;
  * The edge is simply removed from the graph.
  * - If the removed edge is an SP-edge, then all nodes, whose shortest path contains the removed edge,
  * potentially require re-computation.
- * When the edge <u, v> is removed, v checks if it has another out-going SP-edge.
+ * When the edge {@code <u, v>} is removed, v checks if it has another out-going SP-edge.
  * If yes, no further computation is required.
  * If v has no other out-going SP-edge, it invalidates its current value, by setting it to INF.
  * Then, it informs all its SP-in-neighbors by sending them an INVALIDATE message.

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/MusicProfiles.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/MusicProfiles.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/MusicProfiles.java
index 297dce2..24244c8 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/MusicProfiles.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/example/MusicProfiles.java
@@ -45,10 +45,10 @@ import org.apache.flink.util.Collector;
 
 /**
  * This example demonstrates how to mix the DataSet Flink API with the Gelly API.
- * The input is a set <userId - songId - playCount> triplets and
+ * The input is a set &lt;userId - songId - playCount&gt; triplets and
  * a set of bad records, i.e. song ids that should not be trusted.
  * Initially, we use the DataSet API to filter out the bad records.
- * Then, we use Gelly to create a user -> song weighted bipartite graph and compute
+ * Then, we use Gelly to create a user -&gt; song weighted bipartite graph and compute
  * the top song (most listened) per user.
  * Then, we use the DataSet API again, to create a user-user similarity graph,
  * based on common songs, where users that are listeners of the same song
@@ -58,11 +58,11 @@ import org.apache.flink.util.Collector;
  * the similarity graph.
  *
  * The triplets input is expected to be given as one triplet per line,
- * in the following format: "<userID>\t<songID>\t<playcount>".
+ * in the following format: "&lt;userID&gt;\t&lt;songID&gt;\t&lt;playcount&gt;".
  *
  * The mismatches input file is expected to contain one mismatch record per line,
  * in the following format:
- * "ERROR: <songID trackID> song_title"
+ * "ERROR: &lt;songID trackID&gt; song_title"
  *
  * If no arguments are provided, the example runs with default data from {@link MusicProfilesData}.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/ApplyFunction.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/ApplyFunction.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/ApplyFunction.java
index 5a8e97a..e9add7c 100755
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/ApplyFunction.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/ApplyFunction.java
@@ -45,7 +45,7 @@ public abstract class ApplyFunction<K, VV, M> implements Serializable {
 
 	/**
 	 * Retrieves the number of vertices in the graph.
-	 * @return the number of vertices if the {@link IterationConfigurationion#setOptNumVertices(boolean)}
+	 * @return the number of vertices if the {@link org.apache.flink.graph.IterationConfiguration#setOptNumVertices(boolean)}
 	 * option has been set; -1 otherwise.
 	 */
 	public long getNumberOfVertices() {
@@ -80,15 +80,11 @@ public abstract class ApplyFunction<K, VV, M> implements Serializable {
 
 	/**
 	 * This method is executed once per superstep before the vertex update function is invoked for each vertex.
-	 *
-	 * @throws Exception Exceptions in the pre-superstep phase cause the superstep to fail.
 	 */
 	public void preSuperstep() {}
 
 	/**
 	 * This method is executed once per superstep after the vertex update function has been invoked for each vertex.
-	 *
-	 * @throws Exception Exceptions in the post-superstep phase cause the superstep to fail.
 	 */
 	public void postSuperstep() {}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/GatherFunction.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/GatherFunction.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/GatherFunction.java
index 563b20e..d914f2a 100755
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/GatherFunction.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/GatherFunction.java
@@ -43,7 +43,7 @@ public abstract class GatherFunction<VV, EV, M> implements Serializable {
 
 	/**
 	 * Retrieves the number of vertices in the graph.
-	 * @return the number of vertices if the {@link IterationConfigurationion#setOptNumVertices(boolean)}
+	 * @return the number of vertices if the {@link org.apache.flink.graph.IterationConfiguration#setOptNumVertices(boolean)}
 	 * option has been set; -1 otherwise.
 	 */
 	public long getNumberOfVertices() {
@@ -69,15 +69,11 @@ public abstract class GatherFunction<VV, EV, M> implements Serializable {
 
 	/**
 	 * This method is executed once per superstep before the vertex update function is invoked for each vertex.
-	 *
-	 * @throws Exception Exceptions in the pre-superstep phase cause the superstep to fail.
 	 */
 	public void preSuperstep() {}
 
 	/**
 	 * This method is executed once per superstep after the vertex update function has been invoked for each vertex.
-	 *
-	 * @throws Exception Exceptions in the post-superstep phase cause the superstep to fail.
 	 */
 	public void postSuperstep() {}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/Neighbor.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/Neighbor.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/Neighbor.java
index 7fa1ed2..4c970fb 100755
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/Neighbor.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/Neighbor.java
@@ -21,8 +21,8 @@ package org.apache.flink.graph.gsa;
 import org.apache.flink.api.java.tuple.Tuple2;
 
 /**
- * This class represents a <sourceVertex, edge> pair
- * This is a wrapper around Tuple2<VV, EV> for convenience in the GatherFunction
+ * This class represents a {@code <sourceVertex, edge>} pair
+ * This is a wrapper around {@code Tuple2<VV, EV>} for convenience in the GatherFunction
  * @param <VV> the vertex value type
  * @param <EV> the edge value type
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/SumFunction.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/SumFunction.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/SumFunction.java
index f27e275..68e8d27 100755
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/SumFunction.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/gsa/SumFunction.java
@@ -43,7 +43,7 @@ public abstract class SumFunction<VV, EV, M> implements Serializable {
 
 	/**
 	 * Retrieves the number of vertices in the graph.
-	 * @return the number of vertices if the {@link IterationConfigurationion#setOptNumVertices(boolean)}
+	 * @return the number of vertices if the {@link org.apache.flink.graph.IterationConfiguration#setOptNumVertices(boolean)}
 	 * option has been set; -1 otherwise.
 	 */
 	public long getNumberOfVertices() {
@@ -69,15 +69,11 @@ public abstract class SumFunction<VV, EV, M> implements Serializable {
 
 	/**
 	 * This method is executed once per superstep before the vertex update function is invoked for each vertex.
-	 *
-	 * @throws Exception Exceptions in the pre-superstep phase cause the superstep to fail.
 	 */
 	public void preSuperstep() {}
 
 	/**
 	 * This method is executed once per superstep after the vertex update function has been invoked for each vertex.
-	 *
-	 * @throws Exception Exceptions in the post-superstep phase cause the superstep to fail.
 	 */
 	public void postSuperstep() {}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/ConnectedComponents.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/ConnectedComponents.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/ConnectedComponents.java
index 745c103..a190cc5 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/ConnectedComponents.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/ConnectedComponents.java
@@ -41,7 +41,7 @@ import org.apache.flink.types.NullValue;
  * 
  * The result is a DataSet of vertices, where the vertex value corresponds to the assigned component ID.
  * 
- * @see {@link org.apache.flink.graph.library.GSAConnectedComponents}
+ * @see org.apache.flink.graph.library.GSAConnectedComponents
  */
 @SuppressWarnings("serial")
 public class ConnectedComponents<K, EV> implements GraphAlgorithm<K, Long, EV, DataSet<Vertex<K, Long>>> {

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAConnectedComponents.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAConnectedComponents.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAConnectedComponents.java
index 4269517..a44ba14 100755
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAConnectedComponents.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAConnectedComponents.java
@@ -34,7 +34,7 @@ import org.apache.flink.types.NullValue;
  * This implementation assumes that the vertices of the input Graph are initialized with unique, Long component IDs.
  * The result is a DataSet of vertices, where the vertex value corresponds to the assigned component ID.
  * 
- * @see {@link org.apache.flink.graph.library.ConnectedComponents}
+ * @see org.apache.flink.graph.library.ConnectedComponents
  */
 public class GSAConnectedComponents<K, EV> implements GraphAlgorithm<K, Long, EV, DataSet<Vertex<K, Long>>> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAPageRank.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAPageRank.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAPageRank.java
index 43a5e5c..99624ca 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAPageRank.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/GSAPageRank.java
@@ -61,7 +61,7 @@ public class GSAPageRank<K> implements GraphAlgorithm<K, Double, Double, DataSet
 	/**
 	 * Creates an instance of the GSA PageRank algorithm.
 	 * If the number of vertices of the input graph is known,
-	 * use the {@link GSAPageRank#GSAPageRank(double, long)} constructor instead.
+	 * use the {@link GSAPageRank#GSAPageRank(double, int)} constructor instead.
 	 * 
 	 * The implementation assumes that each page has at least one incoming and one outgoing link.
 	 * 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/PageRank.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/PageRank.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/PageRank.java
index dfbe14b..935058d 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/PageRank.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/PageRank.java
@@ -62,7 +62,7 @@ public class PageRank<K> implements GraphAlgorithm<K, Double, Double, DataSet<Ve
 	/**
 	 * Creates an instance of the PageRank algorithm.
 	 * If the number of vertices of the input graph is known,
-	 * use the {@link PageRank#PageRank(double, long)} constructor instead.
+	 * use the {@link PageRank#PageRank(double, int)} constructor instead.
 	 * 
 	 * The implementation assumes that each page has at least one incoming and one outgoing link.
 	 * 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/MessagingFunction.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/MessagingFunction.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/MessagingFunction.java
index 271db86..f29fc9d 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/MessagingFunction.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/MessagingFunction.java
@@ -53,7 +53,7 @@ public abstract class MessagingFunction<K, VV, Message, EV> implements Serializa
 
 	/**
 	 * Retrieves the number of vertices in the graph.
-	 * @return the number of vertices if the {@link IterationConfiguration#setOptNumVertices(boolean)}
+	 * @return the number of vertices if the {@link org.apache.flink.graph.IterationConfiguration#setOptNumVertices(boolean)}
 	 * option has been set; -1 otherwise.
 	 */
 	public long getNumberOfVertices() {
@@ -314,8 +314,7 @@ public abstract class MessagingFunction<K, VV, Message, EV> implements Serializa
 
 	/**
 	 * Retrieves the vertex in-degree (number of in-coming edges).
-	 * @return The in-degree of this vertex if the {@link IterationConfiguration#setOptDegrees(boolean)}
-	 * option has been set; -1 otherwise. 
+	 * @return The in-degree of this vertex
 	 */
 	public long getInDegree() {
 		return inDegree;
@@ -327,8 +326,7 @@ public abstract class MessagingFunction<K, VV, Message, EV> implements Serializa
 
 	/**
 	 * Retrieve the vertex out-degree (number of out-going edges).
-	 * @return The out-degree of this vertex if the {@link IterationConfiguration#setOptDegrees(boolean)}
-	 * option has been set; -1 otherwise. 
+	 * @return The out-degree of this vertex
 	 */
 	public long getOutDegree() {
 		return outDegree;

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexCentricIteration.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexCentricIteration.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexCentricIteration.java
index fdc39ff..be48f9c 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexCentricIteration.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexCentricIteration.java
@@ -46,7 +46,7 @@ import com.google.common.base.Preconditions;
 
 /**
  * This class represents iterative graph computations, programmed in a vertex-centric perspective.
- * It is a special case of <i>Bulk Synchronous Parallel<i> computation. The paradigm has also been
+ * It is a special case of <i>Bulk Synchronous Parallel</i> computation. The paradigm has also been
  * implemented by Google's <i>Pregel</i> system and by <i>Apache Giraph</i>.
  * <p>
  * Vertex centric algorithms operate on graphs, which are defined through vertices and edges. The 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexUpdateFunction.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexUpdateFunction.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexUpdateFunction.java
index 248925b..857cef5 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexUpdateFunction.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/spargel/VertexUpdateFunction.java
@@ -30,12 +30,12 @@ import org.apache.flink.util.Collector;
 
 /**
  * This class must be extended by functions that compute the state of the vertex depending on the old state and the
- * incoming messages. The central method is {@link #updateVertex(Comparable, Object, MessageIterator)}, which is
+ * incoming messages. The central method is {@link #updateVertex(Vertex, MessageIterator)}, which is
  * invoked once per vertex per superstep.
  * 
- * <K> The vertex key type.
- * <VV> The vertex value type.
- * <Message> The message type.
+ * {@code <K>} The vertex key type.
+ * {@code <VV>} The vertex value type.
+ * {@code <Message>} The message type.
  */
 public abstract class VertexUpdateFunction<K, VV, Message> implements Serializable {
 
@@ -50,7 +50,7 @@ public abstract class VertexUpdateFunction<K, VV, Message> implements Serializab
 
 	/**
 	 * Retrieves the number of vertices in the graph.
-	 * @return the number of vertices if the {@link IterationConfiguration#setOptNumVertices(boolean)}
+	 * @return the number of vertices if the {@link org.apache.flink.graph.IterationConfiguration#setOptNumVertices(boolean)}
 	 * option has been set; -1 otherwise.
 	 */
 	public long getNumberOfVertices() {
@@ -206,8 +206,7 @@ public abstract class VertexUpdateFunction<K, VV, Message> implements Serializab
 
 	/**
 	 * Retrieves the vertex in-degree (number of in-coming edges).
-	 * @return The in-degree of this vertex if the {@link IterationConfiguration#setOptDegrees(boolean)}
-	 * option has been set; -1 otherwise. 
+	 * @return The in-degree of this vertex
 	 */
 	public long getInDegree() {
 		return inDegree;
@@ -219,8 +218,7 @@ public abstract class VertexUpdateFunction<K, VV, Message> implements Serializab
 
 	/**
 	 * Retrieve the vertex out-degree (number of out-going edges).
-	 * @return The out-degree of this vertex if the {@link IterationConfiguration#setOptDegrees(boolean)}
-	 * option has been set; -1 otherwise. 
+	 * @return The out-degree of this vertex
 	 */
 	public long getOutDegree() {
 		return outDegree;

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/OptimizerNode.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/OptimizerNode.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/OptimizerNode.java
index 490c304..a3bf995 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/OptimizerNode.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/OptimizerNode.java
@@ -50,7 +50,7 @@ import org.apache.flink.util.Visitor;
  * The OptimizerNode is the base class of all nodes in the optimizer DAG. The optimizer DAG is the
  * optimizer's representation of a program, created before the actual optimization (which creates different
  * candidate plans and computes their cost).
- * <p>>
+ * <p>
  * Nodes in the DAG correspond (almost) one-to-one to the operators in a program. The optimizer DAG is constructed
  * to hold the additional information that the optimizer needs:
  * <ul>

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-optimizer/src/main/java/org/apache/flink/optimizer/operators/OperatorDescriptorSingle.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/operators/OperatorDescriptorSingle.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/operators/OperatorDescriptorSingle.java
index c8be5d4..ffa317c 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/operators/OperatorDescriptorSingle.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/operators/OperatorDescriptorSingle.java
@@ -35,7 +35,7 @@ import org.apache.flink.optimizer.plan.SingleInputPlanNode;
  * Abstract base class for Operator descriptions which instantiates the node and sets the driver
  * strategy and the sorting and grouping keys. Returns possible local and global properties and
  * updates them after the operation has been performed.
- * @see org.apache.flink.compiler.dag.SingleInputNode
+ * @see org.apache.flink.optimizer.dag.SingleInputNode
  */
 public abstract class OperatorDescriptorSingle implements AbstractOperatorDescriptor {
 	

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-optimizer/src/main/java/org/apache/flink/optimizer/plan/Channel.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plan/Channel.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plan/Channel.java
index b139b62..a4448c6 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plan/Channel.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plan/Channel.java
@@ -186,7 +186,6 @@ public class Channel implements EstimateProvider, Cloneable, DumpableConnection<
 	 * Sets the data exchange mode (batch / pipelined) to use for the data
 	 * exchange of this channel.
 	 *
-	 * @return The data exchange mode of this channel.
 	 */
 	public void setDataExchangeMode(DataExchangeMode dataExchangeMode) {
 		this.dataExchangeMode = checkNotNull(dataExchangeMode);

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-optimizer/src/main/java/org/apache/flink/optimizer/postpass/SparseKeySchema.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/postpass/SparseKeySchema.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/postpass/SparseKeySchema.java
index e14888e..1545d6f 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/postpass/SparseKeySchema.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/postpass/SparseKeySchema.java
@@ -26,7 +26,7 @@ import java.util.Map.Entry;
 import org.apache.flink.types.Key;
 
 /**
- * Class encapsulating a schema map (int column position -> column type) and a reference counter.
+ * Class encapsulating a schema map (int column position -&gt; column type) and a reference counter.
  */
 public class SparseKeySchema extends AbstractSchema<Class<? extends Key<?>>> {
 	

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobService.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobService.java b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobService.java
index a2400b5..e6ea8d3 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobService.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobService.java
@@ -52,7 +52,6 @@ public interface BlobService {
 
 	/**
 	 * Shutdown method which is called to terminate the blob service.
-	 * @throws IOException
 	 */
 	void shutdown();
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java
index 81159f6..19c65d4 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java
@@ -149,7 +149,6 @@ public class PendingCheckpoint {
 	
 	/**
 	 * Discards the pending checkpoint, releasing all held resources.
-	 * @throws Exception 
 	 */
 	public void discard(ClassLoader userClassLoader, boolean discardStateHandle) {
 		synchronized (lock) {

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java b/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
index 9f4a5a7..e3e3256 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
@@ -22,7 +22,7 @@ package org.apache.flink.runtime.execution;
  * An enumeration of all states that a task can be in during its execution.
  * Tasks usually start in the state {@code CREATED} and switch states according to
  * this diagram:
- * <pre>
+ * <pre>{@code
  *
  *     CREATED  -> SCHEDULED -> DEPLOYING -> RUNNING -> FINISHED
  *                     |            |          |
@@ -33,7 +33,7 @@ package org.apache.flink.runtime.execution;
  *                     +-------------------------+
  *
  *                                               ... -> FAILED
- * </pre>
+ * }</pre>
  *
  * <p>It is possible to enter the {@code FAILED} state from any other state.</p>
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java b/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
index fa5ad85..1b8b1d9 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
@@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory;
 /**
  * The FileCache is used to create the local files for the registered cache files when a task is deployed.
  * The files will be removed when the task is unregistered after a 5 second delay.
- * A given file x will be placed in "<system-tmp-dir>/tmp_<jobID>/".
+ * A given file x will be placed in "{@code <system-tmp-dir>/tmp_<jobID>/}".
  */
 public class FileCache {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotSharingGroupAssignment.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotSharingGroupAssignment.java b/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotSharingGroupAssignment.java
index 94249de..7d666fe 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotSharingGroupAssignment.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotSharingGroupAssignment.java
@@ -57,7 +57,7 @@ import org.slf4j.LoggerFactory;
  * 
  * <p>Consider a job set up like this:</p>
  * 
- * <pre>
+ * <pre>{@code
  * +-------------- Slot Sharing Group --------------+
  * |                                                |
  * |            +-- Co Location Group --+           |
@@ -66,7 +66,7 @@ import org.slf4j.LoggerFactory;
  * |            |                       |           |
  * |            +-----------------------+           |
  * +------------------------------------------------+
- * </pre>
+ * }</pre>
  * 
  * <p>The slot hierarchy in the slot sharing group will look like the following</p> 
  * 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelReader.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelReader.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelReader.java
index 957052e..8e33cda 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelReader.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelReader.java
@@ -46,7 +46,7 @@ public interface BlockChannelReader<T> extends FileIOChannel {
 	 * such a segment is available, or until an error occurs in the reader, or the reader is closed.
 	 * <p>
 	 * WARNING: If this method is invoked without any segment ever returning (for example, because the
-	 * {@link #readBlock(T)} method has not been invoked appropriately), the method may block
+	 * {@link #readBlock} method has not been invoked appropriately), the method may block
 	 * forever.
 	 * 
 	 * @return The next memory segment from the reader's return queue.

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelWriter.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelWriter.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelWriter.java
index ccf065a..d6cfd7e 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelWriter.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BlockChannelWriter.java
@@ -35,7 +35,7 @@ public interface BlockChannelWriter<T> extends BlockChannelWriterWithCallback<T>
 	 * writer is closed.
 	 * <p>
 	 * NOTE: If this method is invoked without any segment ever returning (for example, because the
-	 * {@link #writeBlock(T)} method has not been invoked accordingly), the method may block
+	 * {@link #writeBlock} method has not been invoked accordingly), the method may block
 	 * forever.
 	 * 
 	 * @return The next memory segment from the writers's return queue.

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
index f18c7e4..d49b27e 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
@@ -32,17 +32,17 @@ import java.io.IOException;
  * <p> As an example, consider a map-reduce program, where the map operator produces data and the
  * reduce operator consumes the produced data.
  *
- * <pre>
+ * <pre>{@code
  * +-----+              +---------------------+              +--------+
  * | Map | = produce => | Intermediate Result | <= consume = | Reduce |
  * +-----+              +---------------------+              +--------+
- * </pre>
+ * }</pre>
  *
  * <p> When deploying such a program in parallel, the intermediate result will be partitioned over its
  * producing parallel subtasks; each of these partitions is furthermore partitioned into one or more
  * subpartitions.
  *
- * <pre>
+ * <pre>{@code
  *                            Intermediate result
  *               +-----------------------------------------+
  *               |                      +----------------+ |              +-----------------------+
@@ -57,7 +57,7 @@ import java.io.IOException;
  * +-------+     | +-------------+  +=> | Subpartition 2 | | <==+======== | Input Gate | Reduce 2 |
  *               |                      +----------------+ |              +-----------------------+
  *               +-----------------------------------------+
- * </pre>
+ * }</pre>
  *
  * <p> In the above example, two map subtasks produce the intermediate result in parallel, resulting
  * in two partitions (Partition 1 and 2). Each of these partitions is further partitioned into two

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGate.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGate.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGate.java
index 896fa9c..47f9228 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGate.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGate.java
@@ -66,17 +66,17 @@ import static com.google.common.base.Preconditions.checkState;
  * <p> As an example, consider a map-reduce program, where the map operator produces data and the
  * reduce operator consumes the produced data.
  *
- * <pre>
+ * <pre>{@code
  * +-----+              +---------------------+              +--------+
  * | Map | = produce => | Intermediate Result | <= consume = | Reduce |
  * +-----+              +---------------------+              +--------+
- * </pre>
+ * }</pre>
  *
  * <p> When deploying such a program in parallel, the intermediate result will be partitioned over its
  * producing parallel subtasks; each of these partitions is furthermore partitioned into one or more
  * subpartitions.
  *
- * <pre>
+ * <pre>{@code
  *                            Intermediate result
  *               +-----------------------------------------+
  *               |                      +----------------+ |              +-----------------------+
@@ -91,7 +91,7 @@ import static com.google.common.base.Preconditions.checkState;
  * +-------+     | +-------------+  +=> | Subpartition 2 | | <==+======== | Input Gate | Reduce 2 |
  *               |                      +----------------+ |              +-----------------------+
  *               +-----------------------------------------+
- * </pre>
+ * }</pre>
  *
  * <p> In the above example, two map subtasks produce the intermediate result in parallel, resulting
  * in two partitions (Partition 1 and 2). Each of these partitions is further partitioned into two

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
index 0d266c2..6836237 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
@@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 
 /**
- * An intermediate iteration task, which runs a {@link org.apache.flink.runtime.operators.PactDriver} inside.
+ * An intermediate iteration task, which runs a {@link org.apache.flink.runtime.operators.Driver} inside.
  * <p>
  * It will propagate {@link EndOfSuperstepEvent}s and {@link TerminationEvent}s to it's connected tasks. Furthermore
  * intermediate tasks can also update the iteration state, either the workset or the solution set.

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/scheduler/Scheduler.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/scheduler/Scheduler.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/scheduler/Scheduler.java
index cb99e52..963fc4c 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/scheduler/Scheduler.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/scheduler/Scheduler.java
@@ -57,7 +57,7 @@ import scala.concurrent.ExecutionContext;
  * <p>The scheduler supports two scheduling modes:</p>
  * <ul>
  *     <li>Immediate scheduling: A request for a task slot immediately returns a task slot, if one is
- *         available, or throws a {@link NoResourceAvailableException}</li>.
+ *         available, or throws a {@link NoResourceAvailableException}.</li>
  *     <li>Queued Scheduling: A request for a task slot is queued and returns a future that will be
  *         fulfilled as soon as a slot becomes available.</li>
  * </ul>

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeInnerJoinIterator.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeInnerJoinIterator.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeInnerJoinIterator.java
index db56227..1b54f20 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeInnerJoinIterator.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeInnerJoinIterator.java
@@ -53,7 +53,7 @@ public abstract class AbstractMergeInnerJoinIterator<T1, T2, O> extends Abstract
 	/**
 	 * Calls the <code>JoinFunction#join()</code> method for all two key-value pairs that share the same key and come
 	 * from different inputs. The output of the <code>join()</code> method is forwarded.
-	 * <p/>
+	 * <p>
 	 * This method first zig-zags between the two sorted inputs in order to find a common
 	 * key, and then calls the join stub with the cross product of the values.
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
index 947a56b..0ad3c6d 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p/>
+ * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -34,11 +34,11 @@ import static com.google.common.base.Preconditions.checkArgument;
  * During the creation of bloom filter expected number of entries must be specified. If the number
  * of insertions exceed the specified initial number of entries then false positive probability will
  * increase accordingly.
- * <p/>
+ * <p>
  * Internally, this implementation of bloom filter uses MemorySegment to store BitSet, BloomFilter and
  * BitSet are designed to be able to switch between different MemorySegments, so that Flink can share
  * the same BloomFilter/BitSet object instance for different bloom filters.
- * <p/>
+ * <p>
  * Part of this class refers to the implementation from Apache Hive project
  * https://github.com/apache/hive/blob/master/common/src/java/org/apache/hive/common/util/BloomFilter.java
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java b/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
index 181d6d6..a7a3458 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
@@ -113,7 +113,7 @@ public final class WebMonitorUtils {
 	/**
 	 * Starts the web runtime monitor. Because the actual implementation of the runtime monitor is
 	 * in another project, we load the runtime monitor dynamically.
-	 * <p/>
+	 * <p>
 	 * Because failure to start the web runtime monitor is not considered fatal, this method does
 	 * not throw any exceptions, but only logs them.
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/yarn/AbstractFlinkYarnCluster.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/yarn/AbstractFlinkYarnCluster.java b/flink-runtime/src/main/java/org/apache/flink/runtime/yarn/AbstractFlinkYarnCluster.java
index 3f78898..d7ef904 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/yarn/AbstractFlinkYarnCluster.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/yarn/AbstractFlinkYarnCluster.java
@@ -100,7 +100,9 @@ public abstract class AbstractFlinkYarnCluster {
 
 	/**
 	 * Disconnect from the ApplicationMaster without stopping the session
-	 * (therefore, use the {@see shutdown()} method.
+	 * (therefore, use the {@link AbstractFlinkYarnCluster#shutdown(boolean)} method.
+	 *
+	 * @see AbstractFlinkYarnCluster#shutdown(boolean)
 	 */
 	public abstract void disconnect();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/StateStorageHelper.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/StateStorageHelper.java b/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/StateStorageHelper.java
index d18cace..36fb849 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/StateStorageHelper.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/StateStorageHelper.java
@@ -23,7 +23,7 @@ import org.apache.flink.runtime.state.StateHandle;
 import java.io.Serializable;
 
 /**
- * State storage helper which is used by {@ZooKeeperStateHandleStore} to persiste state before
+ * State storage helper which is used by {@link ZooKeeperStateHandleStore} to persiste state before
  * the state handle is written to ZooKeeper.
  *
  * @param <T>

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java b/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
index 6073a39..1d89d64 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
@@ -50,13 +50,13 @@ import static com.google.common.base.Preconditions.checkNotNull;
  * <p>ZooKeeper holds the ground truth about state handles, i.e. the following holds:
  *
  * <pre>
- * State handle in ZooKeeper => State handle exists
+ * State handle in ZooKeeper =&gt; State handle exists
  * </pre>
  *
  * But not:
  *
  * <pre>
- * State handle exists => State handle in ZooKeeper
+ * State handle exists =&gt; State handle in ZooKeeper
  * </pre>
  *
  * There can be lingering state handles when failures happen during operation. They
@@ -316,7 +316,7 @@ public class ZooKeeperStateHandleStore<T extends Serializable> {
 	/**
 	 * Removes a state handle from ZooKeeper.
 	 *
-	 * <p><stong>Important</stong>: this does not discard the state handle. If you want to
+	 * <p><strong>Important</strong>: this does not discard the state handle. If you want to
 	 * discard the state handle call {@link #removeAndDiscardState(String)}.
 	 *
 	 * @param pathInZooKeeper Path of state handle to remove (expected to start with a '/')
@@ -331,7 +331,7 @@ public class ZooKeeperStateHandleStore<T extends Serializable> {
 	/**
 	 * Removes a state handle from ZooKeeper asynchronously.
 	 *
-	 * <p><stong>Important</stong>: this does not discard the state handle. If you want to
+	 * <p><strong>Important</strong>: this does not discard the state handle. If you want to
 	 * discard the state handle call {@link #removeAndDiscardState(String)}.
 	 *
 	 * @param pathInZooKeeper Path of state handle to remove (expected to start with a '/')

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
----------------------------------------------------------------------
diff --git a/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java b/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
index 4d81daf..83ab23d 100644
--- a/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
+++ b/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
@@ -111,7 +111,7 @@ public final class HadoopMapFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	
 	/**
 	 * Custom serialization methods.
-	 *  @see http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html
+	 * @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
 	 */
 	private void writeObject(final ObjectOutputStream out) throws IOException {
 		out.writeObject(mapper.getClass());

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
----------------------------------------------------------------------
diff --git a/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java b/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
index f00859f..97b9768 100644
--- a/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
+++ b/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
@@ -138,7 +138,7 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 
 	/**
 	 * Custom serialization methods.
-	 *  @see http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html
+	 * @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
 	 */
 	private void writeObject(final ObjectOutputStream out) throws IOException {
 		

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
----------------------------------------------------------------------
diff --git a/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java b/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
index 6943421..1c47696 100644
--- a/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
+++ b/flink-staging/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
@@ -118,8 +118,8 @@ public final class HadoopReduceFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	}
 
 	/**
-	 * Custom serialization methods.
-	 *  @see http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html
+	 * Custom serialization methods
+	 * @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
 	 */
 	private void writeObject(final ObjectOutputStream out) throws IOException {
 		

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-staging/flink-scala-shell/src/main/java/org/apache/flink/api/java/JarHelper.java
----------------------------------------------------------------------
diff --git a/flink-staging/flink-scala-shell/src/main/java/org/apache/flink/api/java/JarHelper.java b/flink-staging/flink-scala-shell/src/main/java/org/apache/flink/api/java/JarHelper.java
index e085c25..83fb342 100644
--- a/flink-staging/flink-scala-shell/src/main/java/org/apache/flink/api/java/JarHelper.java
+++ b/flink-staging/flink-scala-shell/src/main/java/org/apache/flink/api/java/JarHelper.java
@@ -37,7 +37,7 @@ import java.util.jar.JarInputStream;
  *
  * Copied from http://grepcode.com/file_/repo1.maven.org/maven2/org.apache.xmlbeans/xmlbeans/2.4.0/org/apache/xmlbeans/impl/common/JarHelper.java/?v=source
  *
- * @author Patrick Calahan <pc...@bea.com>
+ * @author Patrick Calahan <a href="mailto:pcal@bea.com">pcal@bea.com</a>
  */
 public class JarHelper
 {

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-staging/flink-table/src/main/java/org/apache/flink/api/java/table/package-info.java
----------------------------------------------------------------------
diff --git a/flink-staging/flink-table/src/main/java/org/apache/flink/api/java/table/package-info.java b/flink-staging/flink-table/src/main/java/org/apache/flink/api/java/table/package-info.java
index 6f9f0a3..97113bb 100644
--- a/flink-staging/flink-table/src/main/java/org/apache/flink/api/java/table/package-info.java
+++ b/flink-staging/flink-table/src/main/java/org/apache/flink/api/java/table/package-info.java
@@ -31,7 +31,7 @@
  * <p>
  * Example:
  *
- * <code>
+ * <pre>{@code
  * ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();
  *
  * DataSet<WC> input = env.fromElements(
@@ -50,11 +50,11 @@
  *
  * result.print();
  * env.execute();
- * </code>
+ * }</pre>
  *
  * <p>
  * As seen above, a {@link org.apache.flink.api.table.Table} can be converted back to the
- * underlying API representation using {@link org.apache.flink.api.java.table.TableEnvironment.toSet}
- * or {@link org.apache.flink.api.java.table.TableEnvironment.toStream}.
+ * underlying API representation using {@link org.apache.flink.api.java.table.TableEnvironment#toDataSet(org.apache.flink.api.table.Table, java.lang.Class)}
+ * or {@link org.apache.flink.api.java.table.TableEnvironment#toDataStream(org.apache.flink.api.table.Table, java.lang.Class)}}.
  */
 package org.apache.flink.api.java.table;

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer.java
----------------------------------------------------------------------
diff --git a/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer.java b/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer.java
index 8066b3c..c7ae1cc 100644
--- a/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer.java
+++ b/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer.java
@@ -79,7 +79,7 @@ import static com.google.common.base.Preconditions.checkNotNull;
  * <ul>
  *     <li>{@link FetcherType#NEW_HIGH_LEVEL}: A fetcher based on the new Kafka consumer API.
  *         This fetcher is generally more robust, but works only with later versions of
- *         Kafka (> 0.8.2).</li>
+ *         Kafka (&gt; 0.8.2).</li>
  *         
  *     <li>{@link FetcherType#LEGACY_LOW_LEVEL}: A fetcher based on the old low-level consumer API.
  *         This fetcher is works also with older versions of Kafka (0.8.1). The fetcher interprets

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FixedPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FixedPartitioner.java b/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FixedPartitioner.java
index 346a7d5..7ab7290 100644
--- a/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FixedPartitioner.java
+++ b/flink-streaming-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FixedPartitioner.java
@@ -28,24 +28,24 @@ import java.io.Serializable;
  * 	# More Flink partitions than kafka partitions
  * <pre>
  * 		Flink Sinks:		Kafka Partitions
- * 			1	---------------->	1
+ * 			1	----------------&gt;	1
  * 			2   --------------/
  * 			3   -------------/
  * 			4	------------/
  * </pre>
- * 	--> Some (or all) kafka partitions contain the output of more than one flink partition
+ * Some (or all) kafka partitions contain the output of more than one flink partition
  *
  *# Fewer Flink partitions than Kafka
  * <pre>
  * 		Flink Sinks:		Kafka Partitions
- * 			1	---------------->	1
- * 			2	---------------->	2
- * 									3
- * 									4
- * 									5
+ * 			1	----------------&gt;	1
+ * 			2	----------------&gt;	2
+ * 										3
+ * 										4
+ * 										5
  * </pre>
  *
- *  --> Not all Kafka partitions contain data
+ *  Not all Kafka partitions contain data
  *  To avoid such an unbalanced partitioning, use a round-robin kafka partitioner. (note that this will
  *  cause a lot of network connections between all the Flink instances and all the Kafka brokers
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/iteration/IterateExample.java
----------------------------------------------------------------------
diff --git a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/iteration/IterateExample.java b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/iteration/IterateExample.java
index 2cf66b9..52ec896 100644
--- a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/iteration/IterateExample.java
+++ b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/iteration/IterateExample.java
@@ -32,10 +32,11 @@ import java.util.List;
 import java.util.Random;
 
 /**
- * Example illustrating iterations in Flink streaming. <p/> <p> The program sums up random numbers and counts additions
+ * Example illustrating iterations in Flink streaming.
+ * <p> The program sums up random numbers and counts additions
  * it performs to reach a specific threshold in an iterative streaming fashion. </p>
- * <p/>
- * <p/>
+ *
+ * <p>
  * This example shows how to use: <ul> <li>streaming iterations, <li>buffer timeout to enhance latency, <li>directed
  * outputs. </ul>
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/ml/IncrementalLearningSkeleton.java
----------------------------------------------------------------------
diff --git a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/ml/IncrementalLearningSkeleton.java b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/ml/IncrementalLearningSkeleton.java
index ce227e4..32cf430 100644
--- a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/ml/IncrementalLearningSkeleton.java
+++ b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/ml/IncrementalLearningSkeleton.java
@@ -40,7 +40,7 @@ import java.util.concurrent.TimeUnit;
  * incremental Alternating Least Squares model while also providing the
  * predictions.
  *
- * <p/>
+ * <p>
  * This example shows how to use:
  * <ul>
  *   <li>Connected streams
@@ -188,7 +188,6 @@ public class IncrementalLearningSkeleton {
 	/**
 	 * Creates newData using the model produced in batch-processing and the
 	 * up-to-date partial model.
-	 * <p/>
 	 * <p>
 	 * By defaults emits the Integer 0 for every newData and the Integer 1
 	 * for every model update.

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/socket/SocketTextStreamWordCount.java
----------------------------------------------------------------------
diff --git a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/socket/SocketTextStreamWordCount.java b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/socket/SocketTextStreamWordCount.java
index 17add2c..cecabdd 100644
--- a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/socket/SocketTextStreamWordCount.java
+++ b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/socket/SocketTextStreamWordCount.java
@@ -26,19 +26,16 @@ import org.apache.flink.streaming.examples.wordcount.WordCount.Tokenizer;
  * This example shows an implementation of WordCount with data from a text
  * socket. To run the example make sure that the service providing the text data
  * is already up and running.
- * <p/>
- * <p/>
+ * <p>
  * To start an example socket text stream on your local machine run netcat from
  * a command line: <code>nc -lk 9999</code>, where the parameter specifies the
  * port number.
- * <p/>
- * <p/>
- * <p/>
+ * </p>
+ * <p>
  * Usage:
  * <code>SocketTextStreamWordCount &lt;hostname&gt; &lt;port&gt; &lt;result path&gt;</code>
- * <br>
- * <p/>
- * <p/>
+ * </p>
+ * <p>
  * This example shows how to:
  * <ul>
  * <li>use StreamExecutionEnvironment.socketTextStream

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/twitter/TwitterStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/twitter/TwitterStream.java b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/twitter/TwitterStream.java
index c2477b5..06872f0 100644
--- a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/twitter/TwitterStream.java
+++ b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/twitter/TwitterStream.java
@@ -31,16 +31,15 @@ import java.util.StringTokenizer;
 /**
  * Implements the "TwitterStream" program that computes a most used word
  * occurrence over JSON files in a streaming fashion.
- * <p/>
- * <p/>
+ * <p>
  * The input is a JSON text file with lines separated by newline characters.
- * <p/>
- * <p/>
+ * </p>
+ * <p>
  * Usage: <code>TwitterStream &lt;text path&gt;</code><br>
  * If no parameters are provided, the program is run with default data from
  * {@link TwitterStreamData}.
- * <p/>
- * <p/>
+ * </p>
+ * <p>
  * This example shows how to:
  * <ul>
  * <li>acquire external data,
@@ -88,13 +87,11 @@ public class TwitterStream {
 
 	/**
 	 * Makes sentences from English tweets.
-	 * <p/>
 	 * <p>
 	 * Implements a string tokenizer that splits sentences into words as a
 	 * user-defined FlatMapFunction. The function takes a line (String) and
-	 * splits it into multiple pairs in the form of "(word,1)" (Tuple2<String,
-	 * Integer>).
-	 * </p>
+	 * splits it into multiple pairs in the form of "(word,1)" ({@code Tuple2<String,
+	 * Integer>}).
 	 */
 	public static class SelectEnglishAndTokenizeFlatMap extends JSONParseFlatMap<String, Tuple2<String, Integer>> {
 		private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/PojoExample.java
----------------------------------------------------------------------
diff --git a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/PojoExample.java b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/PojoExample.java
index 591ef51..9b0b63c 100644
--- a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/PojoExample.java
+++ b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/PojoExample.java
@@ -126,8 +126,8 @@ public class PojoExample {
 	/**
 	 * Implements the string tokenizer that splits sentences into words as a
 	 * user-defined FlatMapFunction. The function takes a line (String) and
-	 * splits it into multiple pairs in the form of "(word,1)" (Tuple2<String,
-	 * Integer>).
+	 * splits it into multiple pairs in the form of "(word,1)" ({@code Tuple2<String,
+	 * Integer>}).
 	 */
 	public static final class Tokenizer implements FlatMapFunction<String, Word> {
 		private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java
----------------------------------------------------------------------
diff --git a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java
index a594c94..a76671e 100644
--- a/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java
+++ b/flink-streaming-examples/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java
@@ -87,8 +87,8 @@ public class WordCount {
 	/**
 	 * Implements the string tokenizer that splits sentences into words as a
 	 * user-defined FlatMapFunction. The function takes a line (String) and
-	 * splits it into multiple pairs in the form of "(word,1)" (Tuple2<String,
-	 * Integer>).
+	 * splits it into multiple pairs in the form of "(word,1)" ({@code Tuple2<String,
+	 * Integer>}).
 	 */
 	public static final class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {
 		private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/680b5a90/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
index 176a07f..309eb05 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
@@ -598,7 +598,7 @@ public class DataStream<T> {
 	}
 
 	/**
-	 * Initiates a Project transformation on a {@link Tuple} {@link DataStream}.<br/>
+	 * Initiates a Project transformation on a {@link Tuple} {@link DataStream}.<br>
 	 * <b>Note: Only Tuple DataStreams can be projected.</b>
 	 *
 	 * <p>