You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by ch...@apache.org on 2017/05/28 06:17:31 UTC

[01/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-twitter

Repository: flink
Updated Branches:
  refs/heads/master 7355a59f4 -> 4f50dc4df


[FLINK-6711] Activate strict checkstyle for flink-connector-twitter


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/85910350
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/85910350
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/85910350

Branch: refs/heads/master
Commit: 85910350276236ebbb9741b66a081e53b0f15f5b
Parents: 7355a59
Author: zentol <ch...@apache.org>
Authored: Wed May 24 22:29:09 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:10:03 2017 +0200

----------------------------------------------------------------------
 .../kinesis/manualtests/ManualProducerTest.java |  2 +-
 .../flink-connector-twitter/pom.xml             |  1 -
 .../connectors/twitter/TwitterSource.java       | 33 +++++++++-----------
 3 files changed, 16 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/85910350/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
index 1df717c..81d0bec 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
@@ -66,7 +66,7 @@ public class ManualProducerTest {
 					// every 10th element goes into a different stream
 					@Override
 					public String getTargetStream(String element) {
-						if(element.split("-")[0].endsWith("0")) {
+						if (element.split("-")[0].endsWith("0")) {
 							return "flink-test-2";
 						}
 						return null; // send to default stream

http://git-wip-us.apache.org/repos/asf/flink/blob/85910350/flink-connectors/flink-connector-twitter/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-twitter/pom.xml b/flink-connectors/flink-connector-twitter/pom.xml
index 38bfb30..0f1e44a 100644
--- a/flink-connectors/flink-connector-twitter/pom.xml
+++ b/flink-connectors/flink-connector-twitter/pom.xml
@@ -39,7 +39,6 @@ under the License.
 		<hbc-core.version>2.2.0</hbc-core.version>
 	</properties>
 
-
 	<dependencies>
 		<dependency>
 			<groupId>org.apache.flink</groupId>

http://git-wip-us.apache.org/repos/asf/flink/blob/85910350/flink-connectors/flink-connector-twitter/src/main/java/org/apache/flink/streaming/connectors/twitter/TwitterSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-twitter/src/main/java/org/apache/flink/streaming/connectors/twitter/TwitterSource.java b/flink-connectors/flink-connector-twitter/src/main/java/org/apache/flink/streaming/connectors/twitter/TwitterSource.java
index 66fa237..b3b04a6 100644
--- a/flink-connectors/flink-connector-twitter/src/main/java/org/apache/flink/streaming/connectors/twitter/TwitterSource.java
+++ b/flink-connectors/flink-connector-twitter/src/main/java/org/apache/flink/streaming/connectors/twitter/TwitterSource.java
@@ -17,29 +17,29 @@
 
 package org.apache.flink.streaming.connectors.twitter;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.Serializable;
-import java.util.Objects;
-import java.util.Properties;
-
-import com.twitter.hbc.common.DelimitedStreamReader;
-import com.twitter.hbc.core.endpoint.StreamingEndpoint;
-import com.twitter.hbc.core.processor.HosebirdMessageProcessor;
 import org.apache.flink.api.common.functions.StoppableFunction;
 import org.apache.flink.api.java.ClosureCleaner;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.twitter.hbc.ClientBuilder;
+import com.twitter.hbc.common.DelimitedStreamReader;
 import com.twitter.hbc.core.Constants;
 import com.twitter.hbc.core.endpoint.StatusesSampleEndpoint;
+import com.twitter.hbc.core.endpoint.StreamingEndpoint;
+import com.twitter.hbc.core.processor.HosebirdMessageProcessor;
 import com.twitter.hbc.httpclient.BasicClient;
 import com.twitter.hbc.httpclient.auth.Authentication;
 import com.twitter.hbc.httpclient.auth.OAuth1;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Serializable;
+import java.util.Objects;
+import java.util.Properties;
 
 /**
  * Implementation of {@link SourceFunction} specialized to emit tweets from
@@ -81,10 +81,9 @@ public class TwitterSource extends RichSourceFunction<String> implements Stoppab
 	private transient Object waitLock;
 	private transient boolean running = true;
 
-
 	/**
-	 * Create {@link TwitterSource} for streaming
-	 * 
+	 * Create {@link TwitterSource} for streaming.
+	 *
 	 * @param properties For the source
 	 */
 	public TwitterSource(Properties properties) {
@@ -97,12 +96,11 @@ public class TwitterSource extends RichSourceFunction<String> implements Stoppab
 	}
 
 	private static void checkProperty(Properties p, String key) {
-		if(!p.containsKey(key)) {
+		if (!p.containsKey(key)) {
 			throw new IllegalArgumentException("Required property '" + key + "' not set.");
 		}
 	}
 
-
 	/**
 	 * Set a custom endpoint initializer.
 	 */
@@ -119,7 +117,6 @@ public class TwitterSource extends RichSourceFunction<String> implements Stoppab
 		waitLock = new Object();
 	}
 
-
 	@Override
 	public void run(final SourceContext<String> ctx) throws Exception {
 		LOG.info("Initializing Twitter Streaming API connection");
@@ -159,7 +156,7 @@ public class TwitterSource extends RichSourceFunction<String> implements Stoppab
 		LOG.info("Twitter Streaming API connection established successfully");
 
 		// just wait now
-		while(running) {
+		while (running) {
 			synchronized (waitLock) {
 				waitLock.wait(100L);
 			}


[06/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-hcatalog

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-hcatalog


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/88189f2c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/88189f2c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/88189f2c

Branch: refs/heads/master
Commit: 88189f2c0c43036270f61bbc8df0cc4fb4d032e1
Parents: 43183ad
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:14:27 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:10 2017 +0200

----------------------------------------------------------------------
 flink-connectors/flink-hcatalog/pom.xml         |  4 +-
 .../flink/hcatalog/HCatInputFormatBase.java     | 39 ++++++++++----------
 .../flink/hcatalog/java/HCatInputFormat.java    | 33 ++++++++---------
 .../flink/hcatalog/scala/HCatInputFormat.scala  |  2 +-
 4 files changed, 39 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/88189f2c/flink-connectors/flink-hcatalog/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hcatalog/pom.xml b/flink-connectors/flink-hcatalog/pom.xml
index a9fbceb..10ca36d 100644
--- a/flink-connectors/flink-hcatalog/pom.xml
+++ b/flink-connectors/flink-hcatalog/pom.xml
@@ -19,9 +19,9 @@ under the License.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-	
+
 	<modelVersion>4.0.0</modelVersion>
-	
+
 	<parent>
 		<groupId>org.apache.flink</groupId>
 		<artifactId>flink-connectors</artifactId>

http://git-wip-us.apache.org/repos/asf/flink/blob/88189f2c/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/HCatInputFormatBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/HCatInputFormatBase.java b/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/HCatInputFormatBase.java
index 859b706..26f2fed 100644
--- a/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/HCatInputFormatBase.java
+++ b/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/HCatInputFormatBase.java
@@ -18,8 +18,8 @@
 
 package org.apache.flink.hcatalog;
 
-import org.apache.flink.api.common.io.RichInputFormat;
 import org.apache.flink.api.common.io.LocatableInputSplitAssigner;
+import org.apache.flink.api.common.io.RichInputFormat;
 import org.apache.flink.api.common.io.statistics.BaseStatistics;
 import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
 import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo;
@@ -31,6 +31,7 @@ import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.typeutils.TupleTypeInfo;
 import org.apache.flink.api.java.typeutils.WritableTypeInfo;
 import org.apache.flink.core.io.InputSplitAssigner;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -57,9 +58,9 @@ import java.util.Map;
  * A InputFormat to read from HCatalog tables.
  * The InputFormat supports projection (selection and order of fields) and partition filters.
  *
- * Data can be returned as {@link org.apache.hive.hcatalog.data.HCatRecord} or Flink-native tuple.
+ * <p>Data can be returned as {@link org.apache.hive.hcatalog.data.HCatRecord} or Flink-native tuple.
  *
- * Note: Flink tuples might only support a limited number of fields (depending on the API).
+ * <p>Note: Flink tuples might only support a limited number of fields (depending on the API).
  *
  * @param <T>
  */
@@ -132,7 +133,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 
 		// build output schema
 		ArrayList<HCatFieldSchema> fieldSchemas = new ArrayList<HCatFieldSchema>(fields.length);
-		for(String field : fields) {
+		for (String field : fields) {
 			fieldSchemas.add(this.outputSchema.get(field));
 		}
 		this.outputSchema = new HCatSchema(fieldSchemas);
@@ -164,7 +165,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 	 * Specifies that the InputFormat returns Flink tuples instead of
 	 * {@link org.apache.hive.hcatalog.data.HCatRecord}.
 	 *
-	 * Note: Flink tuples might only support a limited number of fields (depending on the API).
+	 * <p>Note: Flink tuples might only support a limited number of fields (depending on the API).
 	 *
 	 * @return This InputFormat.
 	 * @throws org.apache.hive.hcatalog.common.HCatException
@@ -173,8 +174,8 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 
 		// build type information
 		int numFields = outputSchema.getFields().size();
-		if(numFields > this.getMaxFlinkTupleSize()) {
-			throw new IllegalArgumentException("Only up to "+this.getMaxFlinkTupleSize()+
+		if (numFields > this.getMaxFlinkTupleSize()) {
+			throw new IllegalArgumentException("Only up to " + this.getMaxFlinkTupleSize() +
 					" fields can be returned as Flink tuples.");
 		}
 
@@ -225,7 +226,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 			case STRUCT:
 				return new GenericTypeInfo(List.class);
 			default:
-				throw new IllegalArgumentException("Unknown data type \""+fieldSchema.getType()+"\" encountered.");
+				throw new IllegalArgumentException("Unknown data type \"" + fieldSchema.getType() + "\" encountered.");
 		}
 	}
 
@@ -283,7 +284,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 		}
 		HadoopInputSplit[] hadoopInputSplits = new HadoopInputSplit[splits.size()];
 
-		for(int i = 0; i < hadoopInputSplits.length; i++){
+		for (int i = 0; i < hadoopInputSplits.length; i++){
 			hadoopInputSplits[i] = new HadoopInputSplit(i, splits.get(i), jobContext);
 		}
 		return hadoopInputSplits;
@@ -299,7 +300,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 		TaskAttemptContext context = null;
 		try {
 			context = HadoopUtils.instantiateTaskAttemptContext(configuration, new TaskAttemptID());
-		} catch(Exception e) {
+		} catch (Exception e) {
 			throw new RuntimeException(e);
 		}
 
@@ -316,7 +317,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 
 	@Override
 	public boolean reachedEnd() throws IOException {
-		if(!this.fetched) {
+		if (!this.fetched) {
 			fetchNext();
 		}
 		return !this.hasNext;
@@ -334,11 +335,11 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 
 	@Override
 	public T nextRecord(T record) throws IOException {
-		if(!this.fetched) {
+		if (!this.fetched) {
 			// first record
 			fetchNext();
 		}
-		if(!this.hasNext) {
+		if (!this.hasNext) {
 			return null;
 		}
 		try {
@@ -347,13 +348,13 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 			HCatRecord v = this.recordReader.getCurrentValue();
 			this.fetched = false;
 
-			if(this.fieldNames.length > 0) {
+			if (this.fieldNames.length > 0) {
 				// return as Flink tuple
 				return this.buildFlinkTuple(record, v);
 
 			} else {
 				// return as HCatRecord
-				return (T)v;
+				return (T) v;
 			}
 
 		} catch (InterruptedException e) {
@@ -374,7 +375,7 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 
 	private void writeObject(ObjectOutputStream out) throws IOException {
 		out.writeInt(this.fieldNames.length);
-		for(String fieldName : this.fieldNames) {
+		for (String fieldName : this.fieldNames) {
 			out.writeUTF(fieldName);
 		}
 		this.configuration.write(out);
@@ -383,19 +384,19 @@ public abstract class HCatInputFormatBase<T> extends RichInputFormat<T, HadoopIn
 	@SuppressWarnings("unchecked")
 	private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
 		this.fieldNames = new String[in.readInt()];
-		for(int i=0; i<this.fieldNames.length; i++) {
+		for (int i = 0; i < this.fieldNames.length; i++) {
 			this.fieldNames[i] = in.readUTF();
 		}
 
 		Configuration configuration = new Configuration();
 		configuration.readFields(in);
 
-		if(this.configuration == null) {
+		if (this.configuration == null) {
 			this.configuration = configuration;
 		}
 
 		this.hCatInputFormat = new org.apache.hive.hcatalog.mapreduce.HCatInputFormat();
-		this.outputSchema = (HCatSchema)HCatUtil.deserialize(this.configuration.get("mapreduce.lib.hcat.output.schema"));
+		this.outputSchema = (HCatSchema) HCatUtil.deserialize(this.configuration.get("mapreduce.lib.hcat.output.schema"));
 	}
 
 	// --------------------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/flink/blob/88189f2c/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/java/HCatInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/java/HCatInputFormat.java b/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/java/HCatInputFormat.java
index 46f3cd5..2520b34 100644
--- a/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/java/HCatInputFormat.java
+++ b/flink-connectors/flink-hcatalog/src/main/java/org/apache/flink/hcatalog/java/HCatInputFormat.java
@@ -18,9 +18,9 @@
 
 package org.apache.flink.hcatalog.java;
 
-
 import org.apache.flink.api.java.tuple.Tuple;
 import org.apache.flink.hcatalog.HCatInputFormatBase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hive.hcatalog.common.HCatException;
 import org.apache.hive.hcatalog.data.HCatRecord;
@@ -29,7 +29,7 @@ import org.apache.hive.hcatalog.data.HCatRecord;
  * A InputFormat to read from HCatalog tables.
  * The InputFormat supports projection (selection and order of fields) and partition filters.
  *
- * Data can be returned as {@link HCatRecord} or Flink {@link org.apache.flink.api.java.tuple.Tuple}.
+ * <p>Data can be returned as {@link HCatRecord} or Flink {@link org.apache.flink.api.java.tuple.Tuple}.
  * Flink tuples support only up to 25 fields.
  *
  * @param <T>
@@ -47,7 +47,6 @@ public class HCatInputFormat<T> extends HCatInputFormatBase<T> {
 		super(database, table, config);
 	}
 
-
 	@Override
 	protected int getMaxFlinkTupleSize() {
 		return 25;
@@ -56,10 +55,10 @@ public class HCatInputFormat<T> extends HCatInputFormatBase<T> {
 	@Override
 	protected T buildFlinkTuple(T t, HCatRecord record) throws HCatException {
 
-		Tuple tuple = (Tuple)t;
+		Tuple tuple = (Tuple) t;
 
 		// Extract all fields from HCatRecord
-		for(int i=0; i < this.fieldNames.length; i++) {
+		for (int i = 0; i < this.fieldNames.length; i++) {
 
 			// get field value
 			Object o = record.get(this.fieldNames[i], this.outputSchema);
@@ -69,49 +68,49 @@ public class HCatInputFormat<T> extends HCatInputFormatBase<T> {
 			//   need to be converted to original type.
 			switch(this.outputSchema.get(i).getType()) {
 				case INT:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Integer.parseInt((String) o), i);
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case TINYINT:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Byte.parseByte((String) o), i);
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case SMALLINT:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Short.parseShort((String) o), i);
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case BIGINT:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Long.parseLong((String) o), i);
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case BOOLEAN:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Boolean.parseBoolean((String) o), i);
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case FLOAT:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Float.parseFloat((String) o), i);
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case DOUBLE:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						tuple.setField(Double.parseDouble((String) o), i);
 					} else {
 						tuple.setField(o, i);
@@ -121,28 +120,28 @@ public class HCatInputFormat<T> extends HCatInputFormatBase<T> {
 					tuple.setField(o, i);
 					break;
 				case BINARY:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						throw new RuntimeException("Cannot handle partition keys of type BINARY.");
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case ARRAY:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						throw new RuntimeException("Cannot handle partition keys of type ARRAY.");
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case MAP:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						throw new RuntimeException("Cannot handle partition keys of type MAP.");
 					} else {
 						tuple.setField(o, i);
 					}
 					break;
 				case STRUCT:
-					if(o instanceof String) {
+					if (o instanceof String) {
 						throw new RuntimeException("Cannot handle partition keys of type STRUCT.");
 					} else {
 						tuple.setField(o, i);
@@ -153,7 +152,7 @@ public class HCatInputFormat<T> extends HCatInputFormatBase<T> {
 			}
 		}
 
-		return (T)tuple;
+		return (T) tuple;
 
 	}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/88189f2c/flink-connectors/flink-hcatalog/src/main/scala/org/apache/flink/hcatalog/scala/HCatInputFormat.scala
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hcatalog/src/main/scala/org/apache/flink/hcatalog/scala/HCatInputFormat.scala b/flink-connectors/flink-hcatalog/src/main/scala/org/apache/flink/hcatalog/scala/HCatInputFormat.scala
index 0299ee1..6491815 100644
--- a/flink-connectors/flink-hcatalog/src/main/scala/org/apache/flink/hcatalog/scala/HCatInputFormat.scala
+++ b/flink-connectors/flink-hcatalog/src/main/scala/org/apache/flink/hcatalog/scala/HCatInputFormat.scala
@@ -36,7 +36,7 @@ class HCatInputFormat[T](
                         database: String,
                         table: String,
                         config: Configuration
-                          ) extends HCatInputFormatBase[T](database, table, config) {
+                         ) extends HCatInputFormatBase[T](database, table, config) {
 
   def this(database: String, table: String) {
     this(database, table, new Configuration)


[04/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-jdbc

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-jdbc


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/23920bb8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/23920bb8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/23920bb8

Branch: refs/heads/master
Commit: 23920bb88eed1f6d1bfe55fb65b5013a784930ba
Parents: d4f7339
Author: zentol <ch...@apache.org>
Authored: Wed May 24 22:55:11 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:08 2017 +0200

----------------------------------------------------------------------
 flink-connectors/flink-jdbc/pom.xml             |  6 +-
 .../flink/api/java/io/jdbc/JDBCInputFormat.java | 48 ++++++-----
 .../api/java/io/jdbc/JDBCOutputFormat.java      | 85 ++++++++++----------
 .../split/GenericParameterValuesProvider.java   | 13 ++-
 .../split/NumericBetweenParametersProvider.java | 24 +++---
 .../io/jdbc/split/ParameterValuesProvider.java  | 17 ++--
 .../flink/api/java/io/jdbc/JDBCFullTest.java    | 22 ++---
 .../api/java/io/jdbc/JDBCInputFormatTest.java   | 34 ++++----
 .../api/java/io/jdbc/JDBCOutputFormatTest.java  | 15 ++--
 .../flink/api/java/io/jdbc/JDBCTestBase.java    | 34 ++++----
 .../src/test/resources/log4j-test.properties    |  2 +-
 11 files changed, 159 insertions(+), 141 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/pom.xml b/flink-connectors/flink-jdbc/pom.xml
index a2bbaf4..0704dc8 100644
--- a/flink-connectors/flink-jdbc/pom.xml
+++ b/flink-connectors/flink-jdbc/pom.xml
@@ -18,11 +18,11 @@ specific language governing permissions and limitations
 under the License.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	
+
 	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-	
+
 	<modelVersion>4.0.0</modelVersion>
-	
+
 	<parent>
 		<groupId>org.apache.flink</groupId>
 		<artifactId>flink-connectors</artifactId>

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
index e714867..835fb23 100644
--- a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
+++ b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
@@ -18,38 +18,39 @@
 
 package org.apache.flink.api.java.io.jdbc;
 
-import java.io.IOException;
-import java.math.BigDecimal;
-import java.sql.Array;
-import java.sql.Connection;
-import java.sql.Date;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.util.Arrays;
-
 import org.apache.flink.api.common.io.DefaultInputSplitAssigner;
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.api.common.io.RichInputFormat;
 import org.apache.flink.api.common.io.statistics.BaseStatistics;
 import org.apache.flink.api.java.io.jdbc.split.ParameterValuesProvider;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
-import org.apache.flink.types.Row;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.io.GenericInputSplit;
 import org.apache.flink.core.io.InputSplit;
 import org.apache.flink.core.io.InputSplitAssigner;
+import org.apache.flink.types.Row;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.sql.Array;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Arrays;
+
 /**
  * InputFormat to read data from a database and generate Rows.
  * The InputFormat has to be configured using the supplied InputFormatBuilder.
- * A valid RowTypeInfo must be properly configured in the builder, e.g.: </br>
+ * A valid RowTypeInfo must be properly configured in the builder, e.g.:
  *
  * <pre><code>
  * TypeInformation<?>[] fieldTypes = new TypeInformation<?>[] {
@@ -70,10 +71,10 @@ import org.slf4j.LoggerFactory;
  *				.finish();
  * </code></pre>
  *
- * In order to query the JDBC source in parallel, you need to provide a
+ * <p>In order to query the JDBC source in parallel, you need to provide a
  * parameterized query template (i.e. a valid {@link PreparedStatement}) and
  * a {@link ParameterValuesProvider} which provides binding values for the
- * query parameters. E.g.:</br>
+ * query parameters. E.g.:
  *
  * <pre><code>
  *
@@ -151,7 +152,7 @@ public class JDBCInputFormat extends RichInputFormat<Row, InputSplit> implements
 	public void closeInputFormat() {
 		//called once per inputFormat (on close)
 		try {
-			if(statement != null) {
+			if (statement != null) {
 				statement.close();
 			}
 		} catch (SQLException se) {
@@ -161,7 +162,7 @@ public class JDBCInputFormat extends RichInputFormat<Row, InputSplit> implements
 		}
 
 		try {
-			if(dbConn != null) {
+			if (dbConn != null) {
 				dbConn.close();
 			}
 		} catch (SQLException se) {
@@ -221,7 +222,7 @@ public class JDBCInputFormat extends RichInputFormat<Row, InputSplit> implements
 						statement.setArray(i + 1, (Array) param);
 					} else {
 						//extends with other types if needed
-						throw new IllegalArgumentException("open() failed. Parameter " + i + " of type " + param.getClass() + " is not handled (yet)." );
+						throw new IllegalArgumentException("open() failed. Parameter " + i + " of type " + param.getClass() + " is not handled (yet).");
 					}
 				}
 				if (LOG.isDebugEnabled()) {
@@ -242,7 +243,7 @@ public class JDBCInputFormat extends RichInputFormat<Row, InputSplit> implements
 	 */
 	@Override
 	public void close() throws IOException {
-		if(resultSet == null) {
+		if (resultSet == null) {
 			return;
 		}
 		try {
@@ -264,7 +265,7 @@ public class JDBCInputFormat extends RichInputFormat<Row, InputSplit> implements
 	}
 
 	/**
-	 * Stores the next resultSet row in a tuple
+	 * Stores the next resultSet row in a tuple.
 	 *
 	 * @param row row to be reused.
 	 * @return row containing next {@link Row}
@@ -319,6 +320,9 @@ public class JDBCInputFormat extends RichInputFormat<Row, InputSplit> implements
 		return new JDBCInputFormatBuilder();
 	}
 
+	/**
+	 * Builder for a {@link JDBCInputFormat}.
+	 */
 	public static class JDBCInputFormatBuilder {
 		private final JDBCInputFormat format;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
index c5585e2..3f2ad33 100644
--- a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
+++ b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
@@ -18,52 +18,53 @@
 
 package org.apache.flink.api.java.io.jdbc;
 
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-
 import org.apache.flink.api.common.io.RichOutputFormat;
 import org.apache.flink.api.java.tuple.Tuple;
-import org.apache.flink.types.Row;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.types.Row;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
 /**
  * OutputFormat to write tuples into a database.
  * The OutputFormat has to be configured using the supplied OutputFormatBuilder.
- * 
+ *
  * @see Tuple
  * @see DriverManager
  */
 public class JDBCOutputFormat extends RichOutputFormat<Row> {
 	private static final long serialVersionUID = 1L;
-	
+
 	private static final Logger LOG = LoggerFactory.getLogger(JDBCOutputFormat.class);
-	
+
 	private String username;
 	private String password;
 	private String drivername;
 	private String dbURL;
 	private String query;
 	private int batchInterval = 5000;
-	
+
 	private Connection dbConn;
 	private PreparedStatement upload;
-	
+
 	private int batchCount = 0;
-	
+
 	public int[] typesArray;
-	
+
 	public JDBCOutputFormat() {
 	}
-	
+
 	@Override
 	public void configure(Configuration parameters) {
 	}
-	
+
 	/**
 	 * Connects to the target database and initializes the prepared statement.
 	 *
@@ -82,7 +83,7 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 			throw new IllegalArgumentException("JDBC driver class not found.", cnfe);
 		}
 	}
-	
+
 	private void establishConnection() throws SQLException, ClassNotFoundException {
 		Class.forName(drivername);
 		if (username == null) {
@@ -91,14 +92,13 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 			dbConn = DriverManager.getConnection(dbURL, username, password);
 		}
 	}
-	
+
 	/**
 	 * Adds a record to the prepared statement.
-	 * <p>
-	 * When this method is called, the output format is guaranteed to be opened.
-	 * </p>
-	 * 
-	 * WARNING: this may fail when no column types specified (because a best effort approach is attempted in order to
+	 *
+	 * <p>When this method is called, the output format is guaranteed to be opened.
+	 *
+	 * <p>WARNING: this may fail when no column types specified (because a best effort approach is attempted in order to
 	 * insert a null value but it's not guaranteed that the JDBC driver handles PreparedStatement.setObject(pos, null))
 	 *
 	 * @param row The records to add to the output.
@@ -110,10 +110,10 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 
 		if (typesArray != null && typesArray.length > 0 && typesArray.length != row.getArity()) {
 			LOG.warn("Column SQL types array doesn't match arity of passed Row! Check the passed array...");
-		} 
+		}
 		try {
 
-			if (typesArray == null ) {
+			if (typesArray == null) {
 				// no types provided
 				for (int index = 0; index < row.getArity(); index++) {
 					LOG.warn("Unknown column type for column %s. Best effort approach to set its value: %s.", index + 1, row.getField(index));
@@ -209,7 +209,7 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 			throw new IllegalArgumentException("writeRecord() failed", e);
 		}
 	}
-	
+
 	/**
 	 * Executes prepared statement and closes all resources of this instance.
 	 *
@@ -228,7 +228,7 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 			upload = null;
 			batchCount = 0;
 		}
-		
+
 		try {
 			if (dbConn != null) {
 				dbConn.close();
@@ -239,56 +239,59 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 			dbConn = null;
 		}
 	}
-	
+
 	public static JDBCOutputFormatBuilder buildJDBCOutputFormat() {
 		return new JDBCOutputFormatBuilder();
 	}
-	
+
+	/**
+	 * Builder for a {@link JDBCOutputFormat}.
+	 */
 	public static class JDBCOutputFormatBuilder {
 		private final JDBCOutputFormat format;
-		
+
 		protected JDBCOutputFormatBuilder() {
 			this.format = new JDBCOutputFormat();
 		}
-		
+
 		public JDBCOutputFormatBuilder setUsername(String username) {
 			format.username = username;
 			return this;
 		}
-		
+
 		public JDBCOutputFormatBuilder setPassword(String password) {
 			format.password = password;
 			return this;
 		}
-		
+
 		public JDBCOutputFormatBuilder setDrivername(String drivername) {
 			format.drivername = drivername;
 			return this;
 		}
-		
+
 		public JDBCOutputFormatBuilder setDBUrl(String dbURL) {
 			format.dbURL = dbURL;
 			return this;
 		}
-		
+
 		public JDBCOutputFormatBuilder setQuery(String query) {
 			format.query = query;
 			return this;
 		}
-		
+
 		public JDBCOutputFormatBuilder setBatchInterval(int batchInterval) {
 			format.batchInterval = batchInterval;
 			return this;
 		}
-		
+
 		public JDBCOutputFormatBuilder setSqlTypes(int[] typesArray) {
 			format.typesArray = typesArray;
 			return this;
 		}
-		
+
 		/**
 		 * Finalizes the configuration and checks validity.
-		 * 
+		 *
 		 * @return Configured JDBCOutputFormat
 		 */
 		public JDBCOutputFormat finish() {
@@ -307,9 +310,9 @@ public class JDBCOutputFormat extends RichOutputFormat<Row> {
 			if (format.drivername == null) {
 				throw new IllegalArgumentException("No driver supplied");
 			}
-			
+
 			return format;
 		}
 	}
-	
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/GenericParameterValuesProvider.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/GenericParameterValuesProvider.java b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/GenericParameterValuesProvider.java
index 2ed2f8c..c43e754 100644
--- a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/GenericParameterValuesProvider.java
+++ b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/GenericParameterValuesProvider.java
@@ -15,22 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.api.java.io.jdbc.split;
 
-import java.io.Serializable;
+package org.apache.flink.api.java.io.jdbc.split;
 
 import org.apache.flink.api.java.io.jdbc.JDBCInputFormat;
 
-/** 
- * 
+import java.io.Serializable;
+
+/**
  * This splits generator actually does nothing but wrapping the query parameters
  * computed by the user before creating the {@link JDBCInputFormat} instance.
- * 
- * */
+ */
 public class GenericParameterValuesProvider implements ParameterValuesProvider {
 
 	private final Serializable[][] parameters;
-	
+
 	public GenericParameterValuesProvider(Serializable[][] parameters) {
 		this.parameters = parameters;
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/NumericBetweenParametersProvider.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/NumericBetweenParametersProvider.java b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/NumericBetweenParametersProvider.java
index 4420172..4b8ecd6 100644
--- a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/NumericBetweenParametersProvider.java
+++ b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/NumericBetweenParametersProvider.java
@@ -15,36 +15,36 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.api.java.io.jdbc.split;
 
-import static org.apache.flink.util.Preconditions.checkArgument;
+package org.apache.flink.api.java.io.jdbc.split;
 
 import java.io.Serializable;
 
-/** 
- * 
+import static org.apache.flink.util.Preconditions.checkArgument;
+
+/**
  * This query parameters generator is an helper class to parameterize from/to queries on a numeric column.
  * The generated array of from/to values will be equally sized to fetchSize (apart from the last one),
  * ranging from minVal up to maxVal.
- * 
- * For example, if there's a table <CODE>BOOKS</CODE> with a numeric PK <CODE>id</CODE>, using a query like:
+ *
+ * <p>For example, if there's a table <CODE>BOOKS</CODE> with a numeric PK <CODE>id</CODE>, using a query like:
  * <PRE>
  *   SELECT * FROM BOOKS WHERE id BETWEEN ? AND ?
  * </PRE>
  *
- * you can take advantage of this class to automatically generate the parameters of the BETWEEN clause,
+ * <p>You can take advantage of this class to automatically generate the parameters of the BETWEEN clause,
  * based on the passed constructor parameters.
- * 
- * */
+ *
+ */
 public class NumericBetweenParametersProvider implements ParameterValuesProvider {
 
 	private final long fetchSize;
 	private final long minVal;
 	private final long maxVal;
-	
+
 	/**
 	 * NumericBetweenParametersProvider constructor.
-	 * 
+	 *
 	 * @param fetchSize the max distance between the produced from/to pairs
 	 * @param minVal the lower bound of the produced "from" values
 	 * @param maxVal the upper bound of the produced "to" values
@@ -72,5 +72,5 @@ public class NumericBetweenParametersProvider implements ParameterValuesProvider
 		}
 		return parameters;
 	}
-	
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/ParameterValuesProvider.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/ParameterValuesProvider.java b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/ParameterValuesProvider.java
index c194497..f31c6e1 100644
--- a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/ParameterValuesProvider.java
+++ b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/split/ParameterValuesProvider.java
@@ -15,21 +15,20 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.api.java.io.jdbc.split;
 
-import java.io.Serializable;
+package org.apache.flink.api.java.io.jdbc.split;
 
 import org.apache.flink.api.java.io.jdbc.JDBCInputFormat;
 
+import java.io.Serializable;
+
 /**
- * 
  * This interface is used by the {@link JDBCInputFormat} to compute the list of parallel query to run (i.e. splits).
- * Each query will be parameterized using a row of the matrix provided by each {@link ParameterValuesProvider} implementation
- * 
- * */
+ * Each query will be parameterized using a row of the matrix provided by each {@link ParameterValuesProvider}
+ * implementation.
+ */
 public interface ParameterValuesProvider {
 
-	/** Returns the necessary parameters array to use for query in parallel a table */
-	public Serializable[][] getParameterValues();
-	
+	/** Returns the necessary parameters array to use for query in parallel a table. */
+	Serializable[][] getParameterValues();
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
index 78cf69c..bd575c3 100644
--- a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
+++ b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
@@ -18,20 +18,24 @@
 
 package org.apache.flink.api.java.io.jdbc;
 
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.Types;
-
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.io.jdbc.JDBCInputFormat.JDBCInputFormatBuilder;
 import org.apache.flink.api.java.io.jdbc.split.NumericBetweenParametersProvider;
 import org.apache.flink.types.Row;
+
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Types;
+
+/**
+ * Tests using both {@link JDBCInputFormat} and {@link JDBCOutputFormat}.
+ */
 public class JDBCFullTest extends JDBCTestBase {
 
 	@Test
@@ -50,7 +54,7 @@ public class JDBCFullTest extends JDBCTestBase {
 				.setDrivername(JDBCTestBase.DRIVER_CLASS)
 				.setDBUrl(JDBCTestBase.DB_URL)
 				.setQuery(JDBCTestBase.SELECT_ALL_BOOKS)
-				.setRowTypeInfo(rowTypeInfo);
+				.setRowTypeInfo(ROW_TYPE_INFO);
 
 		if (exploitParallelism) {
 			final int fetchSize = 1;
@@ -69,8 +73,8 @@ public class JDBCFullTest extends JDBCTestBase {
 		source.output(JDBCOutputFormat.buildJDBCOutputFormat()
 				.setDrivername(JDBCTestBase.DRIVER_CLASS)
 				.setDBUrl(JDBCTestBase.DB_URL)
-				.setQuery("insert into newbooks (id,title,author,price,qty) values (?,?,?,?,?)")
-				.setSqlTypes(new int[]{Types.INTEGER, Types.VARCHAR, Types.VARCHAR,Types.DOUBLE,Types.INTEGER})
+				.setQuery("insert into newbooks (id, title, author, price, qty) values (?,?,?,?,?)")
+				.setSqlTypes(new int[]{Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.DOUBLE, Types.INTEGER})
 				.finish());
 
 		environment.execute();

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
index 3f6a87a..b1416ea 100644
--- a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
+++ b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
@@ -23,6 +23,7 @@ import org.apache.flink.api.java.io.jdbc.split.NumericBetweenParametersProvider;
 import org.apache.flink.api.java.io.jdbc.split.ParameterValuesProvider;
 import org.apache.flink.core.io.InputSplit;
 import org.apache.flink.types.Row;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
@@ -31,6 +32,9 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.sql.ResultSet;
 
+/**
+ * Tests for the {@link JDBCInputFormat}.
+ */
 public class JDBCInputFormatTest extends JDBCTestBase {
 
 	private JDBCInputFormat jdbcInputFormat;
@@ -60,7 +64,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 				.setDrivername("org.apache.derby.jdbc.idontexist")
 				.setDBUrl(DB_URL)
 				.setQuery(SELECT_ALL_BOOKS)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.finish();
 		jdbcInputFormat.openInputFormat();
 	}
@@ -71,7 +75,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl("jdbc:der:iamanerror:mory:ebookshop")
 				.setQuery(SELECT_ALL_BOOKS)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.finish();
 		jdbcInputFormat.openInputFormat();
 	}
@@ -82,7 +86,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl(DB_URL)
 				.setQuery("iamnotsql")
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.finish();
 		jdbcInputFormat.openInputFormat();
 	}
@@ -92,7 +96,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 		jdbcInputFormat = JDBCInputFormat.buildJDBCInputFormat()
 				.setDrivername(DRIVER_CLASS)
 				.setQuery(SELECT_ALL_BOOKS)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.finish();
 	}
 
@@ -102,7 +106,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl(DB_URL)
 				.setQuery(SELECT_ALL_BOOKS)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)
 				.finish();
 		//this query does not exploit parallelism
@@ -122,7 +126,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 		jdbcInputFormat.closeInputFormat();
 		Assert.assertEquals(TEST_DATA.length, recordCount);
 	}
-	
+
 	@Test
 	public void testJDBCInputFormatWithParallelismAndNumericColumnSplitting() throws IOException {
 		final int fetchSize = 1;
@@ -133,7 +137,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl(DB_URL)
 				.setQuery(JDBCTestBase.SELECT_ALL_BOOKS_SPLIT_BY_ID)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.setParametersProvider(pramProvider)
 				.setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)
 				.finish();
@@ -163,13 +167,13 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 	public void testJDBCInputFormatWithoutParallelismAndNumericColumnSplitting() throws IOException {
 		final long min = TEST_DATA[0].id;
 		final long max = TEST_DATA[TEST_DATA.length - 1].id;
-		final long fetchSize = max + 1;//generate a single split
+		final long fetchSize = max + 1; //generate a single split
 		ParameterValuesProvider pramProvider = new NumericBetweenParametersProvider(fetchSize, min, max);
 		jdbcInputFormat = JDBCInputFormat.buildJDBCInputFormat()
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl(DB_URL)
 				.setQuery(JDBCTestBase.SELECT_ALL_BOOKS_SPLIT_BY_ID)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.setParametersProvider(pramProvider)
 				.setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)
 				.finish();
@@ -194,7 +198,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 		jdbcInputFormat.closeInputFormat();
 		Assert.assertEquals(TEST_DATA.length, recordCount);
 	}
-	
+
 	@Test
 	public void testJDBCInputFormatWithParallelismAndGenericSplitting() throws IOException {
 		Serializable[][] queryParameters = new String[2][1];
@@ -205,7 +209,7 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl(DB_URL)
 				.setQuery(JDBCTestBase.SELECT_ALL_BOOKS_SPLIT_BY_AUTHOR)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.setParametersProvider(paramProvider)
 				.setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)
 				.finish();
@@ -231,21 +235,21 @@ public class JDBCInputFormatTest extends JDBCTestBase {
 
 			int id = ((int) row.getField(0));
 			int testDataIndex = id - 1001;
-			
+
 			assertEquals(TEST_DATA[testDataIndex], row);
 			sum += id;
 		}
-		
+
 		Assert.assertEquals(expectedIDSum, sum);
 	}
-	
+
 	@Test
 	public void testEmptyResults() throws IOException {
 		jdbcInputFormat = JDBCInputFormat.buildJDBCInputFormat()
 				.setDrivername(DRIVER_CLASS)
 				.setDBUrl(DB_URL)
 				.setQuery(SELECT_EMPTY)
-				.setRowTypeInfo(rowTypeInfo)
+				.setRowTypeInfo(ROW_TYPE_INFO)
 				.setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)
 				.finish();
 		try {

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormatTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormatTest.java b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormatTest.java
index a67c1ce..3f14504 100644
--- a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormatTest.java
+++ b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormatTest.java
@@ -18,6 +18,12 @@
 
 package org.apache.flink.api.java.io.jdbc;
 
+import org.apache.flink.types.Row;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -25,11 +31,9 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 
-import org.apache.flink.types.Row;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-
+/**
+ * Tests for the {@link JDBCOutputFormat}.
+ */
 public class JDBCOutputFormatTest extends JDBCTestBase {
 
 	private JDBCOutputFormat jdbcOutputFormat;
@@ -80,7 +84,6 @@ public class JDBCOutputFormatTest extends JDBCTestBase {
 				.finish();
 	}
 
-
 	@Test(expected = IllegalArgumentException.class)
 	public void testIncompatibleTypes() throws IOException {
 		jdbcOutputFormat = JDBCOutputFormat.buildJDBCOutputFormat()

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
index 13da4c7..7189393 100644
--- a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
+++ b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
@@ -15,24 +15,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.api.java.io.jdbc;
 
+import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
+import org.apache.flink.api.java.typeutils.RowTypeInfo;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
 import java.io.OutputStream;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.sql.Statement;
 
-import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
-import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
 /**
- * Base test class for JDBC Input and Output formats
+ * Base test class for JDBC Input and Output formats.
  */
 public class JDBCTestBase {
-	
+
 	public static final String DRIVER_CLASS = "org.apache.derby.jdbc.EmbeddedDriver";
 	public static final String DB_URL = "jdbc:derby:memory:ebookshop";
 	public static final String INPUT_TABLE = "books";
@@ -43,7 +45,7 @@ public class JDBCTestBase {
 	public static final String INSERT_TEMPLATE = "insert into %s (id, title, author, price, qty) values (?,?,?,?,?)";
 	public static final String SELECT_ALL_BOOKS_SPLIT_BY_ID = SELECT_ALL_BOOKS + " WHERE id BETWEEN ? AND ?";
 	public static final String SELECT_ALL_BOOKS_SPLIT_BY_AUTHOR = SELECT_ALL_BOOKS + " WHERE author = ?";
-	
+
 	public static final TestEntry[] TEST_DATA = {
 			new TestEntry(1001, ("Java public for dummies"), ("Tan Ah Teck"), 11.11, 11),
 			new TestEntry(1002, ("More Java for dummies"), ("Tan Ah Teck"), 22.22, 22),
@@ -57,13 +59,13 @@ public class JDBCTestBase {
 			new TestEntry(1010, ("A Teaspoon of Java 1.8"), ("Kevin Jones"), null, 1010)
 	};
 
-	protected static class TestEntry {
+	static class TestEntry {
 		protected final Integer id;
 		protected final String title;
 		protected final String author;
 		protected final Double price;
 		protected final Integer qty;
-		
+
 		private TestEntry(Integer id, String title, String author, Double price, Integer qty) {
 			this.id = id;
 			this.title = title;
@@ -73,7 +75,7 @@ public class JDBCTestBase {
 		}
 	}
 
-	public static final RowTypeInfo rowTypeInfo = new RowTypeInfo(
+	public static final RowTypeInfo ROW_TYPE_INFO = new RowTypeInfo(
 		BasicTypeInfo.INT_TYPE_INFO,
 		BasicTypeInfo.STRING_TYPE_INFO,
 		BasicTypeInfo.STRING_TYPE_INFO,
@@ -91,7 +93,7 @@ public class JDBCTestBase {
 		sqlQueryBuilder.append("PRIMARY KEY (id))");
 		return sqlQueryBuilder.toString();
 	}
-	
+
 	public static String getInsertQuery() {
 		StringBuilder sqlQueryBuilder = new StringBuilder("INSERT INTO books (id, title, author, price, qty) VALUES ");
 		for (int i = 0; i < TEST_DATA.length; i++) {
@@ -108,7 +110,7 @@ public class JDBCTestBase {
 		String insertQuery = sqlQueryBuilder.toString();
 		return insertQuery;
 	}
-	
+
 	public static final OutputStream DEV_NULL = new OutputStream() {
 		@Override
 		public void write(int b) {
@@ -126,13 +128,13 @@ public class JDBCTestBase {
 			insertDataIntoInputTable(conn);
 		}
 	}
-	
+
 	private static void createTable(Connection conn, String tableName) throws SQLException {
 		Statement stat = conn.createStatement();
 		stat.executeUpdate(getCreateQuery(tableName));
 		stat.close();
 	}
-	
+
 	private static void insertDataIntoInputTable(Connection conn) throws SQLException {
 		Statement stat = conn.createStatement();
 		stat.execute(getInsertQuery());
@@ -147,7 +149,7 @@ public class JDBCTestBase {
 			Statement stat = conn.createStatement()) {
 
 			stat.executeUpdate("DROP TABLE " + INPUT_TABLE);
-			stat.executeUpdate("DROP TABLE " + OUTPUT_TABLE);	
+			stat.executeUpdate("DROP TABLE " + OUTPUT_TABLE);
 		}
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/23920bb8/flink-connectors/flink-jdbc/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-jdbc/src/test/resources/log4j-test.properties b/flink-connectors/flink-jdbc/src/test/resources/log4j-test.properties
index 2fb9345..c977d4c 100644
--- a/flink-connectors/flink-jdbc/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-jdbc/src/test/resources/log4j-test.properties
@@ -16,4 +16,4 @@
 # limitations under the License.
 ################################################################################
 
-log4j.rootLogger=OFF
\ No newline at end of file
+log4j.rootLogger=OFF


[14/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kinesis

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connector-kinesis


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b12de1ed
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b12de1ed
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b12de1ed

Branch: refs/heads/master
Commit: b12de1ed546fc902922797ef647af97f763a903a
Parents: 28e8043
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:56:16 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:19 2017 +0200

----------------------------------------------------------------------
 .../kinesis/FlinkKinesisConsumer.java           | 15 ++---
 .../kinesis/FlinkKinesisProducer.java           | 31 +++++-----
 .../connectors/kinesis/KinesisPartitioner.java  | 11 +++-
 .../kinesis/config/AWSConfigConstants.java      | 26 ++++-----
 .../kinesis/config/ConsumerConfigConstants.java | 45 +++++++--------
 .../kinesis/config/ProducerConfigConstants.java |  2 +-
 .../kinesis/examples/ConsumeFromKinesis.java    |  3 +-
 .../kinesis/examples/ProduceIntoKinesis.java    | 11 +++-
 .../kinesis/internals/KinesisDataFetcher.java   | 48 ++++++++--------
 .../kinesis/internals/ShardConsumer.java        | 23 ++++----
 .../kinesis/model/KinesisStreamShard.java       | 10 ++--
 .../kinesis/model/KinesisStreamShardState.java  |  3 +-
 .../kinesis/model/SentinelSequenceNumber.java   | 16 +++---
 .../kinesis/model/StreamShardHandle.java        |  8 ++-
 .../kinesis/proxy/GetShardListResult.java       |  2 +-
 .../connectors/kinesis/proxy/KinesisProxy.java  | 56 ++++++++++---------
 .../kinesis/proxy/KinesisProxyInterface.java    |  7 ++-
 .../KinesisDeserializationSchema.java           |  2 +-
 .../KinesisDeserializationSchemaWrapper.java    |  2 +-
 .../KinesisSerializationSchema.java             |  4 +-
 .../connectors/kinesis/util/AWSUtil.java        |  9 +--
 .../kinesis/util/KinesisConfigUtil.java         |  5 +-
 .../FlinkKinesisConsumerMigrationTest.java      | 18 +++---
 .../kinesis/FlinkKinesisConsumerTest.java       | 19 ++++---
 .../internals/KinesisDataFetcherTest.java       | 59 +++++++++++---------
 .../kinesis/internals/ShardConsumerTest.java    | 12 ++--
 .../manualtests/ManualConsumerProducerTest.java | 16 +++---
 .../manualtests/ManualExactlyOnceTest.java      | 14 +++--
 ...nualExactlyOnceWithStreamReshardingTest.java | 23 ++++----
 .../kinesis/manualtests/ManualProducerTest.java |  9 +--
 .../kinesis/proxy/KinesisProxyTest.java         |  9 ++-
 .../ExactlyOnceValidatingConsumerThread.java    | 15 ++---
 .../testutils/FakeKinesisBehavioursFactory.java | 37 ++++++------
 .../KinesisEventsGeneratorProducerThread.java   |  7 ++-
 .../testutils/KinesisShardIdGenerator.java      |  6 +-
 .../testutils/TestableFlinkKinesisConsumer.java |  4 ++
 .../testutils/TestableKinesisDataFetcher.java   | 21 ++++---
 37 files changed, 330 insertions(+), 278 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
index ea76ccc..d127f2b 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
@@ -44,13 +44,14 @@ import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeseri
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
 import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Map;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 
 import static org.apache.flink.util.Preconditions.checkArgument;
@@ -82,24 +83,24 @@ public class FlinkKinesisConsumer<T> extends RichParallelSourceFunction<T> imple
 	//  Consumer properties
 	// ------------------------------------------------------------------------
 
-	/** The names of the Kinesis streams that we will be consuming from */
+	/** The names of the Kinesis streams that we will be consuming from. */
 	private final List<String> streams;
 
 	/** Properties to parametrize settings such as AWS service region, initial position in stream,
-	 * shard list retrieval behaviours, etc */
+	 * shard list retrieval behaviours, etc. */
 	private final Properties configProps;
 
-	/** User supplied deserialization schema to convert Kinesis byte messages to Flink objects */
+	/** User supplied deserialization schema to convert Kinesis byte messages to Flink objects. */
 	private final KinesisDeserializationSchema<T> deserializer;
 
 	// ------------------------------------------------------------------------
 	//  Runtime state
 	// ------------------------------------------------------------------------
 
-	/** Per-task fetcher for Kinesis data records, where each fetcher pulls data from one or more Kinesis shards */
+	/** Per-task fetcher for Kinesis data records, where each fetcher pulls data from one or more Kinesis shards. */
 	private transient KinesisDataFetcher<T> fetcher;
 
-	/** The sequence numbers to restore to upon restore from failure */
+	/** The sequence numbers to restore to upon restore from failure. */
 	private transient HashMap<StreamShardMetadata, SequenceNumber> sequenceNumsToRestore;
 
 	private volatile boolean running = true;
@@ -108,7 +109,7 @@ public class FlinkKinesisConsumer<T> extends RichParallelSourceFunction<T> imple
 	//  State for Checkpoint
 	// ------------------------------------------------------------------------
 
-	/** State name to access shard sequence number states; cannot be changed */
+	/** State name to access shard sequence number states; cannot be changed. */
 	private static final String sequenceNumsStateStoreName = "Kinesis-Stream-Shard-State";
 
 	private transient ListState<Tuple2<StreamShardMetadata, SequenceNumber>> sequenceNumsStateForCheckpoint;

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
index 579bd6b..04d7055 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
@@ -14,16 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kinesis;
 
-import com.amazonaws.services.kinesis.producer.Attempt;
-import com.amazonaws.services.kinesis.producer.KinesisProducer;
-import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration;
-import com.amazonaws.services.kinesis.producer.UserRecordFailedException;
-import com.amazonaws.services.kinesis.producer.UserRecordResult;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.flink.api.java.ClosureCleaner;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
@@ -33,6 +26,15 @@ import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
 import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
 import org.apache.flink.util.PropertiesUtil;
+
+import com.amazonaws.services.kinesis.producer.Attempt;
+import com.amazonaws.services.kinesis.producer.KinesisProducer;
+import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration;
+import com.amazonaws.services.kinesis.producer.UserRecordFailedException;
+import com.amazonaws.services.kinesis.producer.UserRecordResult;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -70,10 +72,8 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 	/* Optional custom partitioner */
 	private KinesisPartitioner<OUT> customPartitioner = null;
 
-
 	// --------------------------- Runtime fields ---------------------------
 
-
 	/* Our Kinesis instance for each parallel Flink sink */
 	private transient KinesisProducer producer;
 
@@ -83,10 +83,8 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 	/* Field for async exception */
 	private transient volatile Throwable thrownException;
 
-
 	// --------------------------- Initialization and configuration  ---------------------------
 
-
 	/**
 	 * Create a new FlinkKinesisProducer.
 	 * This is a constructor supporting Flink's {@see SerializationSchema}.
@@ -104,6 +102,7 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 				return ByteBuffer.wrap(schema.serialize(element));
 			}
 			// use default stream and hash key
+
 			@Override
 			public String getTargetStream(OUT element) {
 				return null;
@@ -147,7 +146,7 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 	}
 
 	/**
-	 * Set default partition id
+	 * Set default partition id.
 	 * @param defaultPartition Name of the default partition
 	 */
 	public void setDefaultPartition(String defaultPartition) {
@@ -160,10 +159,8 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 		this.customPartitioner = partitioner;
 	}
 
-
 	// --------------------------- Lifecycle methods ---------------------------
 
-
 	@Override
 	public void open(Configuration parameters) throws Exception {
 		super.open(parameters);
@@ -186,7 +183,7 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 			@Override
 			public void onSuccess(UserRecordResult result) {
 				if (!result.isSuccessful()) {
-					if(failOnError) {
+					if (failOnError) {
 						thrownException = new RuntimeException("Record was not sent successful");
 					} else {
 						LOG.warn("Record was not sent successful");
@@ -222,7 +219,7 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> {
 				List<Attempt> attempts = ((UserRecordFailedException) thrownException).getResult().getAttempts();
 				for (Attempt attempt: attempts) {
 					if (attempt.getErrorMessage() != null) {
-						errorMessages += attempt.getErrorMessage() +"\n";
+						errorMessages += attempt.getErrorMessage() + "\n";
 					}
 				}
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
index bd23abe..6af01c9 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
@@ -14,22 +14,27 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.kinesis;
 
+package org.apache.flink.streaming.connectors.kinesis;
 
 import java.io.Serializable;
 
+/**
+ * An interface for partitioning records.
+ *
+ * @param <T> record type
+ */
 public abstract class KinesisPartitioner<T> implements Serializable {
 
 	/**
-	 * Return a partition id based on the input
+	 * Return a partition id based on the input.
 	 * @param element Element to partition
 	 * @return A string representing the partition id
 	 */
 	public abstract String getPartitionId(T element);
 
 	/**
-	 * Optional method for setting an explicit hash key
+	 * Optional method for setting an explicit hash key.
 	 * @param element Element to get the hash key for
 	 * @return the hash key for the element
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
index 01d4f00..eb14fc0 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
@@ -20,7 +20,7 @@ package org.apache.flink.streaming.connectors.kinesis.config;
 import com.amazonaws.auth.AWSCredentialsProvider;
 
 /**
- * Configuration keys for AWS service usage
+ * Configuration keys for AWS service usage.
  */
 public class AWSConfigConstants {
 
@@ -30,41 +30,41 @@ public class AWSConfigConstants {
 	 */
 	public enum CredentialProvider {
 
-		/** Look for the environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to create AWS credentials */
+		/** Look for the environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to create AWS credentials. */
 		ENV_VAR,
 
-		/** Look for Java system properties aws.accessKeyId and aws.secretKey to create AWS credentials */
+		/** Look for Java system properties aws.accessKeyId and aws.secretKey to create AWS credentials. */
 		SYS_PROP,
 
-		/** Use a AWS credentials profile file to create the AWS credentials */
+		/** Use a AWS credentials profile file to create the AWS credentials. */
 		PROFILE,
 
-		/** Simply create AWS credentials by supplying the AWS access key ID and AWS secret key in the configuration properties */
+		/** Simply create AWS credentials by supplying the AWS access key ID and AWS secret key in the configuration properties. */
 		BASIC,
 
-		/** A credentials provider chain will be used that searches for credentials in this order: ENV_VARS, SYS_PROPS, PROFILE in the AWS instance metadata **/
+		/** A credentials provider chain will be used that searches for credentials in this order: ENV_VARS, SYS_PROPS, PROFILE in the AWS instance metadata. **/
 		AUTO,
 	}
 
-	/** The AWS region of the Kinesis streams to be pulled ("us-east-1" is used if not set) */
+	/** The AWS region of the Kinesis streams to be pulled ("us-east-1" is used if not set). */
 	public static final String AWS_REGION = "aws.region";
 
-	/** The AWS access key ID to use when setting credentials provider type to BASIC */
+	/** The AWS access key ID to use when setting credentials provider type to BASIC. */
 	public static final String AWS_ACCESS_KEY_ID = "aws.credentials.provider.basic.accesskeyid";
 
-	/** The AWS secret key to use when setting credentials provider type to BASIC */
+	/** The AWS secret key to use when setting credentials provider type to BASIC. */
 	public static final String AWS_SECRET_ACCESS_KEY = "aws.credentials.provider.basic.secretkey";
 
-	/** The credential provider type to use when AWS credentials are required (BASIC is used if not set)*/
+	/** The credential provider type to use when AWS credentials are required (BASIC is used if not set). */
 	public static final String AWS_CREDENTIALS_PROVIDER = "aws.credentials.provider";
 
-	/** Optional configuration for profile path if credential provider type is set to be PROFILE */
+	/** Optional configuration for profile path if credential provider type is set to be PROFILE. */
 	public static final String AWS_PROFILE_PATH = "aws.credentials.provider.profile.path";
 
-	/** Optional configuration for profile name if credential provider type is set to be PROFILE */
+	/** Optional configuration for profile name if credential provider type is set to be PROFILE. */
 	public static final String AWS_PROFILE_NAME = "aws.credentials.provider.profile.name";
 
-	/** The AWS endpoint for Kinesis (derived from the AWS region setting if not set) */
+	/** The AWS endpoint for Kinesis (derived from the AWS region setting if not set). */
 	public static final String AWS_ENDPOINT = "aws.endpoint";
 
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
index 7c31af4..8362776 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -17,13 +17,14 @@
 
 package org.apache.flink.streaming.connectors.kinesis.config;
 
-import com.amazonaws.services.kinesis.model.ShardIteratorType;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+
 /**
- * Optional consumer specific configuration keys and default values for {@link FlinkKinesisConsumer}
+ * Optional consumer specific configuration keys and default values for {@link FlinkKinesisConsumer}.
  */
 public class ConsumerConfigConstants extends AWSConfigConstants {
 
@@ -33,13 +34,13 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 	 */
 	public enum InitialPosition {
 
-		/** Start reading from the earliest possible record in the stream (excluding expired data records) */
+		/** Start reading from the earliest possible record in the stream (excluding expired data records). */
 		TRIM_HORIZON(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM),
 
-		/** Start reading from the latest incoming record */
+		/** Start reading from the latest incoming record. */
 		LATEST(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM),
 
-		/** Start reading from the record at the specified timestamp */
+		/** Start reading from the record at the specified timestamp. */
 		AT_TIMESTAMP(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM);
 
 		private SentinelSequenceNumber sentinelSequenceNumber;
@@ -53,55 +54,55 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 		}
 	}
 
-	/** The initial position to start reading Kinesis streams from (LATEST is used if not set) */
+	/** The initial position to start reading Kinesis streams from (LATEST is used if not set). */
 	public static final String STREAM_INITIAL_POSITION = "flink.stream.initpos";
 
-	/** The initial timestamp to start reading Kinesis stream from (when AT_TIMESTAMP is set for STREAM_INITIAL_POSITION) */
+	/** The initial timestamp to start reading Kinesis stream from (when AT_TIMESTAMP is set for STREAM_INITIAL_POSITION). */
 	public static final String STREAM_INITIAL_TIMESTAMP = "flink.stream.initpos.timestamp";
 
-	/** The date format of initial timestamp to start reading Kinesis stream from (when AT_TIMESTAMP is set for STREAM_INITIAL_POSITION) */
+	/** The date format of initial timestamp to start reading Kinesis stream from (when AT_TIMESTAMP is set for STREAM_INITIAL_POSITION). */
 	public static final String STREAM_TIMESTAMP_DATE_FORMAT = "flink.stream.initpos.timestamp.format";
 
-	/** The base backoff time between each describeStream attempt */
+	/** The base backoff time between each describeStream attempt. */
 	public static final String STREAM_DESCRIBE_BACKOFF_BASE = "flink.stream.describe.backoff.base";
 
-	/** The maximum backoff time between each describeStream attempt */
+	/** The maximum backoff time between each describeStream attempt. */
 	public static final String STREAM_DESCRIBE_BACKOFF_MAX = "flink.stream.describe.backoff.max";
 
-	/** The power constant for exponential backoff between each describeStream attempt */
+	/** The power constant for exponential backoff between each describeStream attempt. */
 	public static final String STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT = "flink.stream.describe.backoff.expconst";
 
-	/** The maximum number of records to try to get each time we fetch records from a AWS Kinesis shard */
+	/** The maximum number of records to try to get each time we fetch records from a AWS Kinesis shard. */
 	public static final String SHARD_GETRECORDS_MAX = "flink.shard.getrecords.maxrecordcount";
 
-	/** The maximum number of getRecords attempts if we get ProvisionedThroughputExceededException */
+	/** The maximum number of getRecords attempts if we get ProvisionedThroughputExceededException. */
 	public static final String SHARD_GETRECORDS_RETRIES = "flink.shard.getrecords.maxretries";
 
-	/** The base backoff time between getRecords attempts if we get a ProvisionedThroughputExceededException */
+	/** The base backoff time between getRecords attempts if we get a ProvisionedThroughputExceededException. */
 	public static final String SHARD_GETRECORDS_BACKOFF_BASE = "flink.shard.getrecords.backoff.base";
 
-	/** The maximum backoff time between getRecords attempts if we get a ProvisionedThroughputExceededException */
+	/** The maximum backoff time between getRecords attempts if we get a ProvisionedThroughputExceededException. */
 	public static final String SHARD_GETRECORDS_BACKOFF_MAX = "flink.shard.getrecords.backoff.max";
 
-	/** The power constant for exponential backoff between each getRecords attempt */
+	/** The power constant for exponential backoff between each getRecords attempt. */
 	public static final String SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT = "flink.shard.getrecords.backoff.expconst";
 
-	/** The interval between each getRecords request to a AWS Kinesis shard in milliseconds */
+	/** The interval between each getRecords request to a AWS Kinesis shard in milliseconds. */
 	public static final String SHARD_GETRECORDS_INTERVAL_MILLIS = "flink.shard.getrecords.intervalmillis";
 
-	/** The maximum number of getShardIterator attempts if we get ProvisionedThroughputExceededException */
+	/** The maximum number of getShardIterator attempts if we get ProvisionedThroughputExceededException. */
 	public static final String SHARD_GETITERATOR_RETRIES = "flink.shard.getiterator.maxretries";
 
-	/** The base backoff time between getShardIterator attempts if we get a ProvisionedThroughputExceededException */
+	/** The base backoff time between getShardIterator attempts if we get a ProvisionedThroughputExceededException. */
 	public static final String SHARD_GETITERATOR_BACKOFF_BASE = "flink.shard.getiterator.backoff.base";
 
-	/** The maximum backoff time between getShardIterator attempts if we get a ProvisionedThroughputExceededException */
+	/** The maximum backoff time between getShardIterator attempts if we get a ProvisionedThroughputExceededException. */
 	public static final String SHARD_GETITERATOR_BACKOFF_MAX = "flink.shard.getiterator.backoff.max";
 
-	/** The power constant for exponential backoff between each getShardIterator attempt */
+	/** The power constant for exponential backoff between each getShardIterator attempt. */
 	public static final String SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT = "flink.shard.getiterator.backoff.expconst";
 
-	/** The interval between each attempt to discover new shards */
+	/** The interval between each attempt to discover new shards. */
 	public static final String SHARD_DISCOVERY_INTERVAL_MILLIS = "flink.shard.discovery.intervalmillis";
 
 	// ------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
index 1edddfc..d131150 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
@@ -20,7 +20,7 @@ package org.apache.flink.streaming.connectors.kinesis.config;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
 
 /**
- * Optional producer specific configuration keys for {@link FlinkKinesisProducer}
+ * Optional producer specific configuration keys for {@link FlinkKinesisProducer}.
  */
 public class ProducerConfigConstants extends AWSConfigConstants {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromKinesis.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromKinesis.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromKinesis.java
index 55668c6..b1ac057 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromKinesis.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromKinesis.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kinesis.examples;
 
 import org.apache.flink.api.java.utils.ParameterTool;
@@ -26,7 +27,7 @@ import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 import java.util.Properties;
 
 /**
- * This is an example on how to consume data from Kinesis
+ * This is an example on how to consume data from Kinesis.
  */
 public class ConsumeFromKinesis {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ProduceIntoKinesis.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ProduceIntoKinesis.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ProduceIntoKinesis.java
index d178137..ee031eb 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ProduceIntoKinesis.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/examples/ProduceIntoKinesis.java
@@ -14,9 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kinesis.examples;
 
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
@@ -25,10 +25,12 @@ import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
 import org.apache.flink.streaming.connectors.kinesis.config.ProducerConfigConstants;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 
+import org.apache.commons.lang3.RandomStringUtils;
+
 import java.util.Properties;
 
 /**
- * This is an example on how to produce data into Kinesis
+ * This is an example on how to produce data into Kinesis.
  */
 public class ProduceIntoKinesis {
 
@@ -57,13 +59,16 @@ public class ProduceIntoKinesis {
 		see.execute();
 	}
 
+	/**
+	 * Data generator that creates strings starting with a sequence number followed by a dash and 12 random characters.
+	 */
 	public static class EventsGenerator implements SourceFunction<String> {
 		private boolean running = true;
 
 		@Override
 		public void run(SourceContext<String> ctx) throws Exception {
 			long seq = 0;
-			while(running) {
+			while (running) {
 				Thread.sleep(10);
 				ctx.collect((seq++) + "-" + RandomStringUtils.randomAlphabetic(12));
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index 11ac6d4..bbfbb20 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -17,30 +17,30 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import com.amazonaws.services.kinesis.model.HashKeyRange;
-import com.amazonaws.services.kinesis.model.SequenceNumberRange;
-import com.amazonaws.services.kinesis.model.Shard;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.util.InstantiationUtil;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
@@ -77,10 +77,10 @@ public class KinesisDataFetcher<T> {
 	//  Consumer-wide settings
 	// ------------------------------------------------------------------------
 
-	/** Configuration properties for the Flink Kinesis Consumer */
+	/** Configuration properties for the Flink Kinesis Consumer. */
 	private final Properties configProps;
 
-	/** The list of Kinesis streams that the consumer is subscribing to */
+	/** The list of Kinesis streams that the consumer is subscribing to. */
 	private final List<String> streams;
 
 	/**
@@ -94,7 +94,7 @@ public class KinesisDataFetcher<T> {
 	//  Subtask-specific settings
 	// ------------------------------------------------------------------------
 
-	/** Runtime context of the subtask that this fetcher was created in */
+	/** Runtime context of the subtask that this fetcher was created in. */
 	private final RuntimeContext runtimeContext;
 
 	private final int totalNumberOfConsumerSubtasks;
@@ -105,7 +105,7 @@ public class KinesisDataFetcher<T> {
 	//  Executor services to run created threads
 	// ------------------------------------------------------------------------
 
-	/** Executor service to run {@link ShardConsumer}s to consume Kinesis shards */
+	/** Executor service to run {@link ShardConsumer}s to consume Kinesis shards. */
 	private final ExecutorService shardConsumersExecutor;
 
 	// ------------------------------------------------------------------------
@@ -135,22 +135,22 @@ public class KinesisDataFetcher<T> {
 
 	private final SourceFunction.SourceContext<T> sourceContext;
 
-	/** Checkpoint lock, also used to synchronize operations on subscribedShardsState */
+	/** Checkpoint lock, also used to synchronize operations on subscribedShardsState. */
 	private final Object checkpointLock;
 
-	/** Reference to the first error thrown by any of the {@link ShardConsumer} threads */
+	/** Reference to the first error thrown by any of the {@link ShardConsumer} threads. */
 	private final AtomicReference<Throwable> error;
 
-	/** The Kinesis proxy that the fetcher will be using to discover new shards */
+	/** The Kinesis proxy that the fetcher will be using to discover new shards. */
 	private final KinesisProxyInterface kinesis;
 
-	/** Thread that executed runFetcher() */
+	/** Thread that executed runFetcher(). */
 	private volatile Thread mainThread;
 
 	/**
 	 * The current number of shards that are actively read by this fetcher.
 	 *
-	 * This value is updated in {@link KinesisDataFetcher#registerNewSubscribedShardState(KinesisStreamShardState)},
+	 * <p>This value is updated in {@link KinesisDataFetcher#registerNewSubscribedShardState(KinesisStreamShardState)},
 	 * and {@link KinesisDataFetcher#updateState(int, SequenceNumber)}.
 	 */
 	private final AtomicInteger numberOfActiveShards = new AtomicInteger(0);
@@ -183,7 +183,7 @@ public class KinesisDataFetcher<T> {
 			KinesisProxy.create(configProps));
 	}
 
-	/** This constructor is exposed for testing purposes */
+	/** This constructor is exposed for testing purposes. */
 	protected KinesisDataFetcher(List<String> streams,
 								SourceFunction.SourceContext<T> sourceContext,
 								Object checkpointLock,
@@ -381,9 +381,9 @@ public class KinesisDataFetcher<T> {
 		shardConsumersExecutor.shutdownNow();
 	}
 
-	/** After calling {@link KinesisDataFetcher#shutdownFetcher()}, this can be called to await the fetcher shutdown */
+	/** After calling {@link KinesisDataFetcher#shutdownFetcher()}, this can be called to await the fetcher shutdown. */
 	public void awaitTermination() throws InterruptedException {
-		while(!shardConsumersExecutor.isTerminated()) {
+		while (!shardConsumersExecutor.isTerminated()) {
 			Thread.sleep(50);
 		}
 	}
@@ -400,7 +400,7 @@ public class KinesisDataFetcher<T> {
 	//  Functions that update the subscribedStreamToLastDiscoveredShardIds state
 	// ------------------------------------------------------------------------
 
-	/** Updates the last discovered shard of a subscribed stream; only updates if the update is valid */
+	/** Updates the last discovered shard of a subscribed stream; only updates if the update is valid. */
 	public void advanceLastDiscoveredShardOfStream(String stream, String shardId) {
 		String lastSeenShardIdOfStream = this.subscribedStreamsToLastDiscoveredShardIds.get(stream);
 
@@ -417,7 +417,7 @@ public class KinesisDataFetcher<T> {
 	/**
 	 * A utility function that does the following:
 	 *
-	 * 1. Find new shards for each stream that we haven't seen before
+	 * <p>1. Find new shards for each stream that we haven't seen before
 	 * 2. For each new shard, determine whether this consumer subtask should subscribe to them;
 	 * 	  if yes, it is added to the returned list of shards
 	 * 3. Update the subscribedStreamsToLastDiscoveredShardIds state so that we won't get shards
@@ -538,7 +538,7 @@ public class KinesisDataFetcher<T> {
 				this.numberOfActiveShards.incrementAndGet();
 			}
 
-			return subscribedShardsState.size()-1;
+			return subscribedShardsState.size() - 1;
 		}
 	}
 
@@ -574,7 +574,7 @@ public class KinesisDataFetcher<T> {
 
 	/**
 	 * Utility function to create an initial map of the last discovered shard id of each subscribed stream, set to null;
-	 * This is called in the constructor; correct values will be set later on by calling advanceLastDiscoveredShardOfStream()
+	 * This is called in the constructor; correct values will be set later on by calling advanceLastDiscoveredShardOfStream().
 	 *
 	 * @param streams the list of subscribed streams
 	 * @return the initial map for subscribedStreamsToLastDiscoveredShardIds
@@ -588,7 +588,7 @@ public class KinesisDataFetcher<T> {
 	}
 
 	/**
-	 * Utility function to convert {@link StreamShardHandle} into {@link StreamShardMetadata}
+	 * Utility function to convert {@link StreamShardHandle} into {@link StreamShardMetadata}.
 	 *
 	 * @param streamShardHandle the {@link StreamShardHandle} to be converted
 	 * @return a {@link StreamShardMetadata} object
@@ -615,7 +615,7 @@ public class KinesisDataFetcher<T> {
 	}
 
 	/**
-	 * Utility function to convert {@link StreamShardMetadata} into {@link StreamShardHandle}
+	 * Utility function to convert {@link StreamShardMetadata} into {@link StreamShardHandle}.
 	 *
 	 * @param streamShardMetadata the {@link StreamShardMetadata} to be converted
 	 * @return a {@link StreamShardHandle} object

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
index a724b49..2d48e5f 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -17,19 +17,20 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
-import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
-import com.amazonaws.services.kinesis.model.GetRecordsResult;
-import com.amazonaws.services.kinesis.model.Record;
-import com.amazonaws.services.kinesis.model.ShardIteratorType;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
-import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
+
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -88,7 +89,7 @@ public class ShardConsumer<T> implements Runnable {
 			KinesisProxy.create(fetcherRef.getConsumerConfiguration()));
 	}
 
-	/** This constructor is exposed for testing purposes */
+	/** This constructor is exposed for testing purposes. */
 	protected ShardConsumer(KinesisDataFetcher<T> fetcherRef,
 							Integer subscribedShardStateIndex,
 							StreamShardHandle subscribedShard,
@@ -186,7 +187,7 @@ public class ShardConsumer<T> implements Runnable {
 				}
 			}
 
-			while(isRunning()) {
+			while (isRunning()) {
 				if (nextShardItr == null) {
 					fetcherRef.updateState(subscribedShardStateIndex, SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
 
@@ -233,7 +234,7 @@ public class ShardConsumer<T> implements Runnable {
 	 * {@link ShardConsumer#getRecords(String, int)} may be able to use the correct sequence number to refresh shard
 	 * iterators if necessary.
 	 *
-	 * Note that the server-side Kinesis timestamp is attached to the record when collected. When the
+	 * <p>Note that the server-side Kinesis timestamp is attached to the record when collected. When the
 	 * user programs uses {@link TimeCharacteristic#EventTime}, this timestamp will be used by default.
 	 *
 	 * @param record record to deserialize and collect
@@ -275,7 +276,7 @@ public class ShardConsumer<T> implements Runnable {
 	 * such occasions. The returned shard iterator within the successful {@link GetRecordsResult} should
 	 * be used for the next call to this method.
 	 *
-	 * Note: it is important that this method is not called again before all the records from the last result have been
+	 * <p>Note: it is important that this method is not called again before all the records from the last result have been
 	 * fully collected with {@link ShardConsumer#deserializeRecordForCollectionAndUpdateState(UserRecord)}, otherwise
 	 * {@link ShardConsumer#lastSequenceNum} may refer to a sub-record in the middle of an aggregated record, leading to
 	 * incorrect shard iteration if the iterator had to be refreshed.

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
index 592e30d..22bfbf5 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
@@ -37,7 +37,7 @@ public class KinesisStreamShard implements Serializable {
 	private final int cachedHash;
 
 	/**
-	 * Create a new KinesisStreamShard
+	 * Create a new KinesisStreamShard.
 	 *
 	 * @param streamName
 	 *           the name of the Kinesis stream that this shard belongs to
@@ -96,7 +96,7 @@ public class KinesisStreamShard implements Serializable {
 	}
 
 	/**
-	 * Utility function to compare two shard ids
+	 * Utility function to compare two shard ids.
 	 *
 	 * @param firstShardId first shard id to compare
 	 * @param secondShardId second shard id to compare
@@ -126,7 +126,9 @@ public class KinesisStreamShard implements Serializable {
 	 * @return whether the shard id is valid
 	 */
 	public static boolean isValidShardId(String shardId) {
-		if (shardId == null) { return false; }
+		if (shardId == null) {
+			return false;
+		}
 		return shardId.matches("^shardId-\\d{12}");
 	}
 
@@ -148,7 +150,7 @@ public class KinesisStreamShard implements Serializable {
 			streamShardMetadata.setStartingHashKey(kinesisStreamShard.getShard().getHashKeyRange().getStartingHashKey());
 			streamShardMetadata.setEndingHashKey(kinesisStreamShard.getShard().getHashKeyRange().getEndingHashKey());
 		}
-		
+
 		if (kinesisStreamShard.getShard().getSequenceNumberRange() != null) {
 			streamShardMetadata.setStartingSequenceNumber(kinesisStreamShard.getShard().getSequenceNumberRange().getStartingSequenceNumber());
 			streamShardMetadata.setEndingSequenceNumber(kinesisStreamShard.getShard().getSequenceNumberRange().getEndingSequenceNumber());

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
index 4b1cc1c..fbd2e47 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
@@ -17,9 +17,10 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
-import com.amazonaws.services.kinesis.model.Shard;
 import org.apache.flink.util.Preconditions;
 
+import com.amazonaws.services.kinesis.model.Shard;
+
 /**
  * A wrapper class that bundles a {@link StreamShardHandle} with its last processed sequence number.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
index 7f9dbbb..a5398e4 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
@@ -28,20 +28,20 @@ import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetche
 public enum SentinelSequenceNumber {
 
 	/** Flag value for shard's sequence numbers to indicate that the
-	 * shard should start to be read from the latest incoming records */
-	SENTINEL_LATEST_SEQUENCE_NUM( new SequenceNumber("LATEST_SEQUENCE_NUM") ),
+	 * shard should start to be read from the latest incoming records. */
+	SENTINEL_LATEST_SEQUENCE_NUM(new SequenceNumber("LATEST_SEQUENCE_NUM")),
 
 	/** Flag value for shard's sequence numbers to indicate that the shard should
-	 * start to be read from the earliest records that haven't expired yet */
-	SENTINEL_EARLIEST_SEQUENCE_NUM( new SequenceNumber("EARLIEST_SEQUENCE_NUM") ),
+	 * start to be read from the earliest records that haven't expired yet. */
+	SENTINEL_EARLIEST_SEQUENCE_NUM(new SequenceNumber("EARLIEST_SEQUENCE_NUM")),
 
 	/** Flag value for shard's sequence numbers to indicate that the shard should
-	 * start to be read from the specified timestamp */
-	SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM( new SequenceNumber("AT_TIMESTAMP_SEQUENCE_NUM") ),
+	 * start to be read from the specified timestamp. */
+	SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM(new SequenceNumber("AT_TIMESTAMP_SEQUENCE_NUM")),
 
 	/** Flag value to indicate that we have already read the last record of this shard
-	 * (Note: Kinesis shards that have been closed due to a split or merge will have an ending data record) */
-	SENTINEL_SHARD_ENDING_SEQUENCE_NUM( new SequenceNumber("SHARD_ENDING_SEQUENCE_NUM") );
+	 * (Note: Kinesis shards that have been closed due to a split or merge will have an ending data record). */
+	SENTINEL_SHARD_ENDING_SEQUENCE_NUM(new SequenceNumber("SHARD_ENDING_SEQUENCE_NUM"));
 
 	private SequenceNumber sentinel;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
index d340a88..767c227 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
@@ -34,7 +34,7 @@ public class StreamShardHandle {
 	private final int cachedHash;
 
 	/**
-	 * Create a new StreamShardHandle
+	 * Create a new StreamShardHandle.
 	 *
 	 * @param streamName
 	 *           the name of the Kinesis stream that this shard belongs to
@@ -93,7 +93,7 @@ public class StreamShardHandle {
 	}
 
 	/**
-	 * Utility function to compare two shard ids
+	 * Utility function to compare two shard ids.
 	 *
 	 * @param firstShardId first shard id to compare
 	 * @param secondShardId second shard id to compare
@@ -123,7 +123,9 @@ public class StreamShardHandle {
 	 * @return whether the shard id is valid
 	 */
 	public static boolean isValidShardId(String shardId) {
-		if (shardId == null) { return false; }
+		if (shardId == null) {
+			return false;
+		}
 		return shardId.matches("^shardId-\\d{12}");
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
index aadb31c..fcfb3ac 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
@@ -19,9 +19,9 @@ package org.apache.flink.streaming.connectors.kinesis.proxy;
 
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
index 70c1286..89e9f04 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -17,34 +17,36 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.services.kinesis.AmazonKinesisClient;
 import com.amazonaws.services.kinesis.model.DescribeStreamRequest;
 import com.amazonaws.services.kinesis.model.DescribeStreamResult;
 import com.amazonaws.services.kinesis.model.GetRecordsRequest;
 import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.GetShardIteratorRequest;
 import com.amazonaws.services.kinesis.model.GetShardIteratorResult;
 import com.amazonaws.services.kinesis.model.LimitExceededException;
 import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
 import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
-import com.amazonaws.services.kinesis.model.StreamStatus;
 import com.amazonaws.services.kinesis.model.Shard;
-import com.amazonaws.services.kinesis.model.GetShardIteratorRequest;
 import com.amazonaws.services.kinesis.model.ShardIteratorType;
-import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
-import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+import com.amazonaws.services.kinesis.model.StreamStatus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+
 import java.util.ArrayList;
+import java.util.Date;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Properties;
 import java.util.Map;
+import java.util.Properties;
 import java.util.Random;
-import java.util.Date;
 
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
@@ -53,7 +55,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * calls to AWS Kinesis for several functions, such as getting a list of shards and
  * fetching a batch of data records starting from a specified record sequence number.
  *
- * NOTE:
+ * <p>NOTE:
  * In the AWS KCL library, there is a similar implementation - {@link com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy}.
  * This implementation differs mainly in that we can make operations to arbitrary Kinesis streams, which is a needed
  * functionality for the Flink Kinesis Connecter since the consumer may simultaneously read from multiple Kinesis streams.
@@ -62,59 +64,59 @@ public class KinesisProxy implements KinesisProxyInterface {
 
 	private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);
 
-	/** The actual Kinesis client from the AWS SDK that we will be using to make calls */
+	/** The actual Kinesis client from the AWS SDK that we will be using to make calls. */
 	private final AmazonKinesisClient kinesisClient;
 
-	/** Random seed used to calculate backoff jitter for Kinesis operations */
-	private final static Random seed = new Random();
+	/** Random seed used to calculate backoff jitter for Kinesis operations. */
+	private static final Random seed = new Random();
 
 	// ------------------------------------------------------------------------
 	//  describeStream() related performance settings
 	// ------------------------------------------------------------------------
 
-	/** Base backoff millis for the describe stream operation */
+	/** Base backoff millis for the describe stream operation. */
 	private final long describeStreamBaseBackoffMillis;
 
-	/** Maximum backoff millis for the describe stream operation */
+	/** Maximum backoff millis for the describe stream operation. */
 	private final long describeStreamMaxBackoffMillis;
 
-	/** Exponential backoff power constant for the describe stream operation */
+	/** Exponential backoff power constant for the describe stream operation. */
 	private final double describeStreamExpConstant;
 
 	// ------------------------------------------------------------------------
 	//  getRecords() related performance settings
 	// ------------------------------------------------------------------------
 
-	/** Base backoff millis for the get records operation */
+	/** Base backoff millis for the get records operation. */
 	private final long getRecordsBaseBackoffMillis;
 
-	/** Maximum backoff millis for the get records operation */
+	/** Maximum backoff millis for the get records operation. */
 	private final long getRecordsMaxBackoffMillis;
 
-	/** Exponential backoff power constant for the get records operation */
+	/** Exponential backoff power constant for the get records operation. */
 	private final double getRecordsExpConstant;
 
-	/** Maximum attempts for the get records operation */
+	/** Maximum attempts for the get records operation. */
 	private final int getRecordsMaxAttempts;
 
 	// ------------------------------------------------------------------------
 	//  getShardIterator() related performance settings
 	// ------------------------------------------------------------------------
 
-	/** Base backoff millis for the get shard iterator operation */
+	/** Base backoff millis for the get shard iterator operation. */
 	private final long getShardIteratorBaseBackoffMillis;
 
-	/** Maximum backoff millis for the get shard iterator operation */
+	/** Maximum backoff millis for the get shard iterator operation. */
 	private final long getShardIteratorMaxBackoffMillis;
 
-	/** Exponential backoff power constant for the get shard iterator operation */
+	/** Exponential backoff power constant for the get shard iterator operation. */
 	private final double getShardIteratorExpConstant;
 
-	/** Maximum attempts for the get shard iterator operation */
+	/** Maximum attempts for the get shard iterator operation. */
 	private final int getShardIteratorMaxAttempts;
 
 	/**
-	 * Create a new KinesisProxy based on the supplied configuration properties
+	 * Create a new KinesisProxy based on the supplied configuration properties.
 	 *
 	 * @param configProps configuration properties containing AWS credential and AWS region info
 	 */
@@ -225,7 +227,7 @@ public class KinesisProxy implements KinesisProxyInterface {
 	public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) throws InterruptedException {
 		GetShardListResult result = new GetShardListResult();
 
-		for (Map.Entry<String,String> streamNameWithLastSeenShardId : streamNamesWithLastSeenShardIds.entrySet()) {
+		for (Map.Entry<String, String> streamNameWithLastSeenShardId : streamNamesWithLastSeenShardIds.entrySet()) {
 			String stream = streamNameWithLastSeenShardId.getKey();
 			String lastSeenShardId = streamNameWithLastSeenShardId.getValue();
 			result.addRetrievedShardsToStream(stream, getShardsOfStream(stream, lastSeenShardId));
@@ -294,7 +296,7 @@ public class KinesisProxy implements KinesisProxyInterface {
 
 	/**
 	 * Determines whether the exception is recoverable using exponential-backoff.
-	 * 
+	 *
 	 * @param ex Exception to inspect
 	 * @return <code>true</code> if the exception can be recovered from, else
 	 *         <code>false</code>
@@ -338,7 +340,7 @@ public class KinesisProxy implements KinesisProxyInterface {
 	/**
 	 * Get metainfo for a Kinesis stream, which contains information about which shards this Kinesis stream possess.
 	 *
-	 * This method is using a "full jitter" approach described in AWS's article,
+	 * <p>This method is using a "full jitter" approach described in AWS's article,
 	 * <a href="https://www.awsarchitectureblog.com/2015/03/backoff.html">"Exponential Backoff and Jitter"</a>.
 	 * This is necessary because concurrent calls will be made by all parallel subtask's fetcher. This
 	 * jitter backoff approach will help distribute calls across the fetchers over time.
@@ -395,6 +397,6 @@ public class KinesisProxy implements KinesisProxyInterface {
 
 	private static long fullJitterBackoff(long base, long max, double power, int attempt) {
 		long exponentialBackoff = (long) Math.min(max, base * Math.pow(power, attempt));
-		return (long)(seed.nextDouble()*exponentialBackoff); // random jitter between 0 and the exponential backoff
+		return (long) (seed.nextDouble() * exponentialBackoff); // random jitter between 0 and the exponential backoff
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
index 807a163..0538151 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
@@ -17,9 +17,10 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
-import com.amazonaws.services.kinesis.model.GetRecordsResult;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+
 import java.util.Map;
 
 /**
@@ -46,7 +47,7 @@ public interface KinesisProxyInterface {
 	String getShardIterator(StreamShardHandle shard, String shardIteratorType, Object startingMarker) throws InterruptedException;
 
 	/**
-	 * Get the next batch of data records using a specific shard iterator
+	 * Get the next batch of data records using a specific shard iterator.
 	 *
 	 * @param shardIterator a shard iterator that encodes info about which shard to read and where to start reading
 	 * @param maxRecordsToGet the maximum amount of records to retrieve for this batch
@@ -67,5 +68,5 @@ public interface KinesisProxyInterface {
 	 *                              operation has exceeded the rate limit; this exception will be thrown
 	 *                              if the backoff is interrupted.
 	 */
-	GetShardListResult getShardList(Map<String,String> streamNamesWithLastSeenShardIds) throws InterruptedException;
+	GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) throws InterruptedException;
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
index 0effdd8..b06b20f 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
@@ -33,7 +33,7 @@ import java.io.Serializable;
 public interface KinesisDeserializationSchema<T> extends Serializable, ResultTypeQueryable<T> {
 
 	/**
-	 * Deserializes a Kinesis record's bytes
+	 * Deserializes a Kinesis record's bytes.
 	 *
 	 * @param recordValue the record's value as a byte array
 	 * @param partitionKey the record's partition key at the time of writing

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
index 6e66038..279d410 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
@@ -23,7 +23,7 @@ import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import java.io.IOException;
 
 /**
- * A simple wrapper for using the {@link DeserializationSchema} with the {@link KinesisDeserializationSchema} interface
+ * A simple wrapper for using the {@link DeserializationSchema} with the {@link KinesisDeserializationSchema} interface.
  *
  * @param <T> The type created by the deserialization schema.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
index 03dd72c..9be410a 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
@@ -14,8 +14,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.kinesis.serialization;
 
+package org.apache.flink.streaming.connectors.kinesis.serialization;
 
 import java.io.Serializable;
 import java.nio.ByteBuffer;
@@ -27,7 +27,7 @@ import java.nio.ByteBuffer;
  */
 public interface KinesisSerializationSchema<T> extends Serializable {
 	/**
-	 * Serialize the given element into a ByteBuffer
+	 * Serialize the given element into a ByteBuffer.
 	 *
 	 * @param element The element to serialize
 	 * @return Serialized representation of the element

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
index a6aad02..5670526 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
@@ -17,6 +17,10 @@
 
 package org.apache.flink.streaming.connectors.kinesis.util;
 
+import org.apache.flink.runtime.util.EnvironmentInformation;
+import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.CredentialProvider;
+
 import com.amazonaws.ClientConfiguration;
 import com.amazonaws.ClientConfigurationFactory;
 import com.amazonaws.auth.AWSCredentials;
@@ -29,9 +33,6 @@ import com.amazonaws.auth.profile.ProfileCredentialsProvider;
 import com.amazonaws.regions.Region;
 import com.amazonaws.regions.Regions;
 import com.amazonaws.services.kinesis.AmazonKinesisClient;
-import org.apache.flink.runtime.util.EnvironmentInformation;
-import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
-import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.CredentialProvider;
 
 import java.util.Properties;
 
@@ -126,7 +127,7 @@ public class AWSUtil {
 	}
 
 	/**
-	 * Checks whether or not a region ID is valid
+	 * Checks whether or not a region ID is valid.
 	 *
 	 * @param region The AWS region ID to check
 	 * @return true if the supplied region ID is valid, false otherwise

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
index 244f5a5..42f1af0 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
@@ -17,7 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kinesis.util;
 
-import com.amazonaws.regions.Regions;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
@@ -26,6 +25,8 @@ import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConsta
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.InitialPosition;
 import org.apache.flink.streaming.connectors.kinesis.config.ProducerConfigConstants;
 
+import com.amazonaws.regions.Regions;
+
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Properties;
@@ -141,7 +142,7 @@ public class KinesisConfigUtil {
 	}
 
 	/**
-	 * Validate configuration properties related to Amazon AWS service
+	 * Validate configuration properties related to Amazon AWS service.
 	 */
 	public static void validateAwsConfiguration(Properties config) {
 		if (config.containsKey(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER)) {

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
index e24a411..af84420 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
@@ -17,19 +17,20 @@
 
 package org.apache.flink.streaming.connectors.kinesis;
 
-import com.amazonaws.services.kinesis.model.Shard;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.api.operators.StreamSource;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShard;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
 import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness;
+
+import com.amazonaws.services.kinesis.model.Shard;
 import org.junit.Test;
 
 import java.net.URL;
@@ -37,8 +38,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Properties;
 
-import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.mockito.Mockito.mock;
 
 /**
@@ -136,11 +137,12 @@ public class FlinkKinesisConsumerMigrationTest {
 		}
 
 		@Override
-		protected KinesisDataFetcher<T> createFetcher(List<String> streams,
-													  	SourceFunction.SourceContext<T> sourceContext,
-													  	RuntimeContext runtimeContext,
-													  	Properties configProps,
-													  	KinesisDeserializationSchema<T> deserializationSchema) {
+		protected KinesisDataFetcher<T> createFetcher(
+				List<String> streams,
+				SourceFunction.SourceContext<T> sourceContext,
+				RuntimeContext runtimeContext,
+				Properties configProps,
+				KinesisDeserializationSchema<T> deserializationSchema) {
 			return mock(KinesisDataFetcher.class);
 		}
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
index 186dfa6..a26e758 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
@@ -17,9 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kinesis;
 
-import com.amazonaws.services.kinesis.model.HashKeyRange;
-import com.amazonaws.services.kinesis.model.SequenceNumberRange;
-import com.amazonaws.services.kinesis.model.Shard;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.common.state.ListState;
@@ -38,15 +35,19 @@ import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConsta
 import org.apache.flink.streaming.connectors.kinesis.config.ProducerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShard;
+import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
-import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
 import org.apache.flink.streaming.connectors.kinesis.testutils.TestableFlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -59,21 +60,21 @@ import org.powermock.api.mockito.PowerMockito;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
-import java.util.HashMap;
-import java.util.Map;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.UUID;
 
-import static org.junit.Assert.fail;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.never;
 
 /**
  * Suite of FlinkKinesisConsumer tests for the methods called throughout the source life cycle.

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
index 4fb6dd4..2e1adb6 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
@@ -17,21 +17,22 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import com.amazonaws.services.kinesis.model.HashKeyRange;
-import com.amazonaws.services.kinesis.model.SequenceNumberRange;
-import com.amazonaws.services.kinesis.model.Shard;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
-import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisBehavioursFactory;
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
 import org.apache.flink.streaming.connectors.kinesis.testutils.TestableKinesisDataFetcher;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mockito;
@@ -40,9 +41,9 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
 import java.util.HashMap;
-import java.util.Map;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.Random;
 import java.util.Set;
@@ -54,6 +55,9 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+/**
+ * Tests for the {@link KinesisDataFetcher}.
+ */
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(TestableKinesisDataFetcher.class)
 public class KinesisDataFetcherTest {
@@ -92,10 +96,10 @@ public class KinesisDataFetcherTest {
 		HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest =
 			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams);
 
-		Map<String,Integer> streamToShardCount = new HashMap<>();
+		Map<String, Integer> streamToShardCount = new HashMap<>();
 		Random rand = new Random();
 		for (String fakeStream : fakeStreams) {
-			streamToShardCount.put(fakeStream, rand.nextInt(5)+1);
+			streamToShardCount.put(fakeStream, rand.nextInt(5) + 1);
 		}
 
 		final TestableKinesisDataFetcher fetcher =
@@ -140,10 +144,10 @@ public class KinesisDataFetcherTest {
 		assertTrue(streamsInState.containsAll(fakeStreams));
 
 		// assert that the last seen shards in state is correctly set
-		for (Map.Entry<String,String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
+		for (Map.Entry<String, String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
 			assertTrue(
 				streamToLastSeenShard.getValue().equals(
-					KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey())-1)));
+					KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey()) - 1)));
 		}
 	}
 
@@ -184,7 +188,7 @@ public class KinesisDataFetcherTest {
 				new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
 			UUID.randomUUID().toString());
 
-		Map<String,Integer> streamToShardCount = new HashMap<>();
+		Map<String, Integer> streamToShardCount = new HashMap<>();
 		streamToShardCount.put("fakeStream1", 3); // fakeStream1 will still have 3 shards after restore
 		streamToShardCount.put("fakeStream2", 2); // fakeStream2 will still have 2 shards after restore
 
@@ -230,10 +234,10 @@ public class KinesisDataFetcherTest {
 		assertTrue(streamsInState.containsAll(fakeStreams));
 
 		// assert that the last seen shards in state is correctly set
-		for (Map.Entry<String,String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
+		for (Map.Entry<String, String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
 			assertTrue(
 				streamToLastSeenShard.getValue().equals(
-					KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey())-1)));
+					KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey()) - 1)));
 		}
 	}
 
@@ -274,9 +278,9 @@ public class KinesisDataFetcherTest {
 				new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
 			UUID.randomUUID().toString());
 
-		Map<String,Integer> streamToShardCount = new HashMap<>();
-		streamToShardCount.put("fakeStream1", 3+1); // fakeStream1 had 3 shards before & 1 new shard after restore
-		streamToShardCount.put("fakeStream2", 2+3); // fakeStream2 had 2 shards before & 3 new shard after restore
+		Map<String, Integer> streamToShardCount = new HashMap<>();
+		streamToShardCount.put("fakeStream1", 3 + 1); // fakeStream1 had 3 shards before & 1 new shard after restore
+		streamToShardCount.put("fakeStream2", 2 + 3); // fakeStream2 had 2 shards before & 3 new shard after restore
 
 		HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest =
 			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams);
@@ -321,10 +325,10 @@ public class KinesisDataFetcherTest {
 		assertTrue(streamsInState.containsAll(fakeStreams));
 
 		// assert that the last seen shards in state is correctly set
-		for (Map.Entry<String,String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
+		for (Map.Entry<String, String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
 			assertTrue(
 				streamToLastSeenShard.getValue().equals(
-					KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey())-1)));
+					KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey()) - 1)));
 		}
 	}
 
@@ -367,7 +371,7 @@ public class KinesisDataFetcherTest {
 				new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
 			UUID.randomUUID().toString());
 
-		Map<String,Integer> streamToShardCount = new HashMap<>();
+		Map<String, Integer> streamToShardCount = new HashMap<>();
 		streamToShardCount.put("fakeStream1", 3); // fakeStream1 has fixed 3 shards
 		streamToShardCount.put("fakeStream2", 2); // fakeStream2 has fixed 2 shards
 		streamToShardCount.put("fakeStream3", 0); // no shards can be found for fakeStream3
@@ -463,9 +467,9 @@ public class KinesisDataFetcherTest {
 				new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
 			UUID.randomUUID().toString());
 
-		Map<String,Integer> streamToShardCount = new HashMap<>();
-		streamToShardCount.put("fakeStream1", 3+1); // fakeStream1 had 3 shards before & 1 new shard after restore
-		streamToShardCount.put("fakeStream2", 2+3); // fakeStream2 had 2 shards before & 2 new shard after restore
+		Map<String, Integer> streamToShardCount = new HashMap<>();
+		streamToShardCount.put("fakeStream1", 3 + 1); // fakeStream1 had 3 shards before & 1 new shard after restore
+		streamToShardCount.put("fakeStream2", 2 + 3); // fakeStream2 had 2 shards before & 2 new shard after restore
 		streamToShardCount.put("fakeStream3", 0); // no shards can be found for fakeStream3
 		streamToShardCount.put("fakeStream4", 0); // no shards can be found for fakeStream4
 
@@ -569,11 +573,12 @@ public class KinesisDataFetcherTest {
 		}
 
 		@Override
-		protected KinesisDataFetcher<T> createFetcher(List<String> streams,
-													  SourceFunction.SourceContext<T> sourceContext,
-													  RuntimeContext runtimeContext,
-													  Properties configProps,
-													  KinesisDeserializationSchema<T> deserializationSchema) {
+		protected KinesisDataFetcher<T> createFetcher(
+				List<String> streams,
+				SourceFunction.SourceContext<T> sourceContext,
+				RuntimeContext runtimeContext,
+				Properties configProps,
+				KinesisDeserializationSchema<T> deserializationSchema) {
 			return fetcher;
 		}
 


[17/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-filesystem

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFrom12MigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFrom12MigrationTest.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFrom12MigrationTest.java
index 350b7b4..e3db8bb 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFrom12MigrationTest.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFrom12MigrationTest.java
@@ -15,15 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import static org.junit.Assert.assertTrue;
+package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.OperatorStateStore;
 import org.apache.flink.runtime.state.FunctionInitializationContext;
@@ -33,6 +27,8 @@ import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
 import org.apache.flink.streaming.util.OperatorSnapshotUtil;
+
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.Path;
 import org.junit.Assert;
 import org.junit.ClassRule;
@@ -40,6 +36,13 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertTrue;
+
 /**
  * Tests for checking whether {@link BucketingSink} can restore from snapshots that were done
  * using the Flink 1.2 {@link BucketingSink}.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkTest.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkTest.java
index 090c54a..67af91f 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkTest.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkTest.java
@@ -15,15 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import org.apache.avro.Schema;
-import org.apache.avro.file.DataFileConstants;
-import org.apache.avro.file.DataFileStream;
-import org.apache.avro.generic.GenericData;
-import org.apache.avro.generic.GenericRecord;
-import org.apache.avro.specific.SpecificDatumReader;
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeinfo.TypeHint;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -39,6 +33,14 @@ import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles;
 import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
 import org.apache.flink.util.NetUtils;
+
+import org.apache.avro.Schema;
+import org.apache.avro.file.DataFileConstants;
+import org.apache.avro.file.DataFileStream;
+import org.apache.avro.generic.GenericData;
+import org.apache.avro.generic.GenericRecord;
+import org.apache.avro.specific.SpecificDatumReader;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -50,10 +52,10 @@ import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
-import org.junit.Assert;
 import org.junit.rules.TemporaryFolder;
 
 import java.io.BufferedReader;
@@ -63,6 +65,9 @@ import java.io.InputStreamReader;
 import java.util.HashMap;
 import java.util.Map;
 
+/**
+ * Tests for the {@link BucketingSink}.
+ */
 public class BucketingSinkTest {
 	@ClassRule
 	public static TemporaryFolder tempFolder = new TemporaryFolder();
@@ -115,8 +120,8 @@ public class BucketingSinkTest {
 			.setWriter(new StringWriter<String>())
 			.setPartPrefix(PART_PREFIX)
 			.setPendingPrefix("")
-			.setInactiveBucketCheckInterval(5*60*1000L)
-			.setInactiveBucketThreshold(5*60*1000L)
+			.setInactiveBucketCheckInterval(5 * 60 * 1000L)
+			.setInactiveBucketThreshold(5 * 60 * 1000L)
 			.setPendingSuffix(PENDING_SUFFIX)
 			.setInProgressSuffix(IN_PROGRESS_SUFFIX);
 
@@ -175,7 +180,7 @@ public class BucketingSinkTest {
 
 		testHarness.processElement(new StreamRecord<>("test1", 1L));
 		testHarness.processElement(new StreamRecord<>("test2", 1L));
-		checkFs(outDir, 2, 0 ,0, 0);
+		checkFs(outDir, 2, 0 , 0, 0);
 
 		testHarness.setProcessingTime(101L);	// put some in pending
 		checkFs(outDir, 0, 2, 0, 0);
@@ -210,7 +215,7 @@ public class BucketingSinkTest {
 
 		testHarness.processElement(new StreamRecord<>("test1", 1L));
 		testHarness.processElement(new StreamRecord<>("test2", 1L));
-		checkFs(outDir, 2, 0 ,0, 0);
+		checkFs(outDir, 2, 0 , 0, 0);
 
 		// this is to check the inactivity threshold
 		testHarness.setProcessingTime(101L);
@@ -758,13 +763,13 @@ public class BucketingSinkTest {
 			testHarness.processElement(new StreamRecord<>(Integer.toString(i % step1NumIds)));
 		}
 
-		testHarness.setProcessingTime(2*60*1000L);
+		testHarness.setProcessingTime(2 * 60 * 1000L);
 
 		for (int i = 0; i < numElementsPerStep; i++) {
 			testHarness.processElement(new StreamRecord<>(Integer.toString(i % step2NumIds)));
 		}
 
-		testHarness.setProcessingTime(6*60*1000L);
+		testHarness.setProcessingTime(6 * 60 * 1000L);
 
 		for (int i = 0; i < numElementsPerStep; i++) {
 			testHarness.processElement(new StreamRecord<>(Integer.toString(i % step2NumIds)));
@@ -791,7 +796,7 @@ public class BucketingSinkTest {
 	}
 
 	/**
-	 * This tests user defined hdfs configuration
+	 * This tests user defined hdfs configuration.
 	 * @throws Exception
 	 */
 	@Test
@@ -810,10 +815,10 @@ public class BucketingSinkTest {
 		Configuration conf = new Configuration();
 		conf.set("io.file.buffer.size", "40960");
 
-		BucketingSink<Tuple2<Integer,String>> sink = new BucketingSink<Tuple2<Integer, String>>(outPath)
+		BucketingSink<Tuple2<Integer, String>> sink = new BucketingSink<Tuple2<Integer, String>>(outPath)
 			.setFSConfig(conf)
 			.setWriter(new StreamWriterWithConfigCheck<Integer, String>(properties, "io.file.buffer.size", "40960"))
-			.setBucketer(new BasePathBucketer<Tuple2<Integer,String>>())
+			.setBucketer(new BasePathBucketer<Tuple2<Integer, String>>())
 			.setPartPrefix(PART_PREFIX)
 			.setPendingPrefix("")
 			.setPendingSuffix("");

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingSinkMigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingSinkMigrationTest.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingSinkMigrationTest.java
index 3355fae..75eb685 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingSinkMigrationTest.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingSinkMigrationTest.java
@@ -15,14 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.streaming.api.operators.StreamSink;
 import org.apache.flink.streaming.connectors.fs.RollingSink;
 import org.apache.flink.streaming.connectors.fs.StringWriter;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
+
+import org.apache.commons.io.FileUtils;
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -34,6 +36,9 @@ import java.net.URL;
 import java.util.List;
 import java.util.Map;
 
+/**
+ * Tests the migration from 1.1 snapshots.
+ */
 @Deprecated
 public class RollingSinkMigrationTest {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingToBucketingMigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingToBucketingMigrationTest.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingToBucketingMigrationTest.java
index 257b157..ed4ab88 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingToBucketingMigrationTest.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/RollingToBucketingMigrationTest.java
@@ -15,14 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.streaming.api.operators.StreamSink;
 import org.apache.flink.streaming.connectors.fs.RollingSink;
 import org.apache.flink.streaming.connectors.fs.StringWriter;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
+
+import org.apache.commons.io.FileUtils;
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -34,6 +36,9 @@ import java.net.URL;
 import java.util.List;
 import java.util.Map;
 
+/**
+ * Tests the migration from {@link RollingSink} to {@link BucketingSink}.
+ */
 public class RollingToBucketingMigrationTest {
 
 	@ClassRule

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/resources/log4j-test.properties b/flink-connectors/flink-connector-filesystem/src/test/resources/log4j-test.properties
index 5c22851..490767a 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-connector-filesystem/src/test/resources/log4j-test.properties
@@ -26,4 +26,4 @@ log4j.appender.testlogger.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
 # suppress the irrelevant (wrong) warnings from the netty channel handler
 log4j.logger.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, testlogger
 
-log4j.logger.org.apache.directory=OFF, testlogger
\ No newline at end of file
+log4j.logger.org.apache.directory=OFF, testlogger

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-hadoop-compatibility/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/resources/log4j-test.properties b/flink-connectors/flink-hadoop-compatibility/src/test/resources/log4j-test.properties
index 0b686e5..881dc06 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/resources/log4j-test.properties
@@ -24,4 +24,4 @@ log4j.appender.A1=org.apache.log4j.ConsoleAppender
 
 # A1 uses PatternLayout.
 log4j.appender.A1.layout=org.apache.log4j.PatternLayout
-log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
\ No newline at end of file
+log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n


[20/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-avro

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-avro


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b58545ec
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b58545ec
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b58545ec

Branch: refs/heads/master
Commit: b58545ecde76ae88be11ebdc305adbd9b132d302
Parents: 1a3a5b6
Author: zentol <ch...@apache.org>
Authored: Fri May 26 00:15:34 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:35 2017 +0200

----------------------------------------------------------------------
 flink-connectors/flink-avro/pom.xml             |   2 +-
 .../apache/flink/api/avro/DataInputDecoder.java |  47 +++--
 .../flink/api/avro/DataOutputEncoder.java       |  47 +++--
 .../api/avro/FSDataInputStreamWrapper.java      |  13 +-
 .../flink/api/java/io/AvroInputFormat.java      |  40 ++---
 .../flink/api/java/io/AvroOutputFormat.java     |  30 ++--
 .../src/test/assembly/test-assembly.xml         |   4 +-
 .../api/avro/AvroExternalJarProgramITCase.java  |  14 +-
 .../flink/api/avro/AvroOutputFormatITCase.java  |  39 +++--
 .../flink/api/avro/EncoderDecoderTest.java      | 173 ++++++++++---------
 .../avro/testjar/AvroExternalJarProgram.java    | 132 +++++++-------
 .../apache/flink/api/io/avro/AvroPojoTest.java  |  24 +--
 .../api/io/avro/AvroRecordInputFormatTest.java  | 150 ++++++++--------
 .../io/avro/AvroSplittableInputFormatTest.java  | 133 +++++++-------
 .../api/io/avro/example/AvroTypeExample.java    |  46 +++--
 .../io/AvroInputFormatTypeExtractionTest.java   |   9 +-
 .../flink/api/java/io/AvroOutputFormatTest.java |  35 ++--
 .../src/test/resources/avro/user.avsc           |   4 +-
 18 files changed, 469 insertions(+), 473 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/pom.xml b/flink-connectors/flink-avro/pom.xml
index 5082924..d057177 100644
--- a/flink-connectors/flink-avro/pom.xml
+++ b/flink-connectors/flink-avro/pom.xml
@@ -19,7 +19,7 @@ under the License.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 		xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-	
+
 	<modelVersion>4.0.0</modelVersion>
 
 	<parent>

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataInputDecoder.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataInputDecoder.java b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataInputDecoder.java
index 59da4cb..870d66f 100644
--- a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataInputDecoder.java
+++ b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataInputDecoder.java
@@ -18,20 +18,22 @@
 
 package org.apache.flink.api.avro;
 
-import java.io.DataInput;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
 import org.apache.avro.io.Decoder;
 import org.apache.avro.util.Utf8;
 
+import java.io.DataInput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
 
+/**
+ * A {@link Decoder} that reads from a {@link DataInput}.
+ */
 public class DataInputDecoder extends Decoder {
-	
+
 	private final Utf8 stringDecoder = new Utf8();
-	
+
 	private DataInput in;
-	
+
 	public void setIn(DataInput in) {
 		this.in = in;
 	}
@@ -39,10 +41,9 @@ public class DataInputDecoder extends Decoder {
 	// --------------------------------------------------------------------------------------------
 	// primitives
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Override
 	public void readNull() {}
-	
 
 	@Override
 	public boolean readBoolean() throws IOException {
@@ -68,12 +69,12 @@ public class DataInputDecoder extends Decoder {
 	public double readDouble() throws IOException {
 		return in.readDouble();
 	}
-	
+
 	@Override
 	public int readEnum() throws IOException {
 		return readInt();
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// bytes
 	// --------------------------------------------------------------------------------------------
@@ -82,7 +83,7 @@ public class DataInputDecoder extends Decoder {
 	public void readFixed(byte[] bytes, int start, int length) throws IOException {
 		in.readFully(bytes, start, length);
 	}
-	
+
 	@Override
 	public ByteBuffer readBytes(ByteBuffer old) throws IOException {
 		int length = readInt();
@@ -97,34 +98,32 @@ public class DataInputDecoder extends Decoder {
 		result.limit(length);
 		return result;
 	}
-	
-	
+
 	@Override
 	public void skipFixed(int length) throws IOException {
 		skipBytes(length);
 	}
-	
+
 	@Override
 	public void skipBytes() throws IOException {
 		int num = readInt();
 		skipBytes(num);
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// strings
 	// --------------------------------------------------------------------------------------------
-	
-	
+
 	@Override
 	public Utf8 readString(Utf8 old) throws IOException {
 		int length = readInt();
 		Utf8 result = (old != null ? old : new Utf8());
 		result.setByteLength(length);
-		
+
 		if (length > 0) {
 			in.readFully(result.getBytes(), 0, length);
 		}
-		
+
 		return result;
 	}
 
@@ -172,7 +171,7 @@ public class DataInputDecoder extends Decoder {
 	public long skipMap() throws IOException {
 		return readVarLongCount(in);
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// union
 	// --------------------------------------------------------------------------------------------
@@ -181,17 +180,17 @@ public class DataInputDecoder extends Decoder {
 	public int readIndex() throws IOException {
 		return readInt();
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// utils
 	// --------------------------------------------------------------------------------------------
-	
+
 	private void skipBytes(int num) throws IOException {
 		while (num > 0) {
 			num -= in.skipBytes(num);
 		}
 	}
-	
+
 	public static long readVarLongCount(DataInput in) throws IOException {
 		long value = in.readUnsignedByte();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataOutputEncoder.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataOutputEncoder.java b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataOutputEncoder.java
index 0102cc1..beae330 100644
--- a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataOutputEncoder.java
+++ b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/DataOutputEncoder.java
@@ -18,36 +18,35 @@
 
 package org.apache.flink.api.avro;
 
-import java.io.DataOutput;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
 import org.apache.avro.io.Encoder;
 import org.apache.avro.util.Utf8;
 
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
 
+/**
+ * An {@link Encoder} that writes data to a {@link DataOutput}.
+ */
 public final class DataOutputEncoder extends Encoder implements java.io.Serializable {
-	
+
 	private static final long serialVersionUID = 1L;
-	
+
 	private DataOutput out;
-	
-	
+
 	public void setOut(DataOutput out) {
 		this.out = out;
 	}
 
-
 	@Override
 	public void flush() throws IOException {}
 
 	// --------------------------------------------------------------------------------------------
 	// primitives
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Override
 	public void writeNull() {}
-	
 
 	@Override
 	public void writeBoolean(boolean b) throws IOException {
@@ -73,13 +72,12 @@ public final class DataOutputEncoder extends Encoder implements java.io.Serializ
 	public void writeDouble(double d) throws IOException {
 		out.writeDouble(d);
 	}
-	
+
 	@Override
 	public void writeEnum(int e) throws IOException {
 		out.writeInt(e);
 	}
-	
-	
+
 	// --------------------------------------------------------------------------------------------
 	// bytes
 	// --------------------------------------------------------------------------------------------
@@ -88,7 +86,7 @@ public final class DataOutputEncoder extends Encoder implements java.io.Serializ
 	public void writeFixed(byte[] bytes, int start, int len) throws IOException {
 		out.write(bytes, start, len);
 	}
-	
+
 	@Override
 	public void writeBytes(byte[] bytes, int start, int len) throws IOException {
 		out.writeInt(len);
@@ -96,17 +94,17 @@ public final class DataOutputEncoder extends Encoder implements java.io.Serializ
 			out.write(bytes, start, len);
 		}
 	}
-	
+
 	@Override
 	public void writeBytes(ByteBuffer bytes) throws IOException {
 		int num = bytes.remaining();
 		out.writeInt(num);
-		
+
 		if (num > 0) {
 			writeFixed(bytes);
 		}
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// strings
 	// --------------------------------------------------------------------------------------------
@@ -116,11 +114,11 @@ public final class DataOutputEncoder extends Encoder implements java.io.Serializ
 		byte[] bytes = Utf8.getBytesFor(str);
 		writeBytes(bytes, 0, bytes.length);
 	}
-	
+
 	@Override
 	public void writeString(Utf8 utf8) throws IOException {
 		writeBytes(utf8.getBytes(), 0, utf8.getByteLength());
-		
+
 	}
 
 	// --------------------------------------------------------------------------------------------
@@ -158,22 +156,21 @@ public final class DataOutputEncoder extends Encoder implements java.io.Serializ
 	// --------------------------------------------------------------------------------------------
 	// union
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Override
 	public void writeIndex(int unionIndex) throws IOException {
 		out.writeInt(unionIndex);
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// utils
 	// --------------------------------------------------------------------------------------------
-		
-	
+
 	public static void writeVarLongCount(DataOutput out, long val) throws IOException {
 		if (val < 0) {
 			throw new IOException("Illegal count (must be non-negative): " + val);
 		}
-		
+
 		while ((val & ~0x7FL) != 0) {
 			out.write(((int) val) | 0x80);
 			val >>>= 7;

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/FSDataInputStreamWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/FSDataInputStreamWrapper.java b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/FSDataInputStreamWrapper.java
index 709c4f1..19e4a89 100644
--- a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/FSDataInputStreamWrapper.java
+++ b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/avro/FSDataInputStreamWrapper.java
@@ -16,20 +16,19 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.api.avro;
 
-import java.io.Closeable;
-import java.io.IOException;
+import org.apache.flink.core.fs.FSDataInputStream;
 
 import org.apache.avro.file.SeekableInput;
-import org.apache.flink.core.fs.FSDataInputStream;
 
+import java.io.Closeable;
+import java.io.IOException;
 
 /**
- * Code copy pasted from org.apache.avro.mapred.FSInput (which is Apache licensed as well)
- * 
- * The wrapper keeps track of the position in the data stream.
+ * Code copy pasted from org.apache.avro.mapred.FSInput (which is Apache licensed as well).
+ *
+ * <p>The wrapper keeps track of the position in the data stream.
  */
 public class FSDataInputStreamWrapper implements Closeable, SeekableInput {
 	private final FSDataInputStream stream;

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroInputFormat.java b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroInputFormat.java
index 73067c1..33105cc 100644
--- a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroInputFormat.java
+++ b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroInputFormat.java
@@ -16,10 +16,19 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.api.java.io;
 
-import java.io.IOException;
+import org.apache.flink.api.avro.FSDataInputStreamWrapper;
+import org.apache.flink.api.common.io.CheckpointableInputFormat;
+import org.apache.flink.api.common.io.FileInputFormat;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
+import org.apache.flink.api.java.typeutils.TypeExtractor;
+import org.apache.flink.core.fs.FileInputSplit;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.util.InstantiationUtil;
+import org.apache.flink.util.Preconditions;
 
 import org.apache.avro.file.DataFileReader;
 import org.apache.avro.file.SeekableInput;
@@ -28,19 +37,10 @@ import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.io.DatumReader;
 import org.apache.avro.reflect.ReflectDatumReader;
 import org.apache.avro.specific.SpecificDatumReader;
-import org.apache.flink.api.common.io.CheckpointableInputFormat;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.util.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.flink.api.avro.FSDataInputStreamWrapper;
-import org.apache.flink.api.common.io.FileInputFormat;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
-import org.apache.flink.api.java.typeutils.TypeExtractor;
-import org.apache.flink.core.fs.FileInputSplit;
-import org.apache.flink.core.fs.Path;
-import org.apache.flink.util.InstantiationUtil;
+
+import java.io.IOException;
 
 /**
  * Provides a {@link FileInputFormat} for Avro records.
@@ -59,7 +59,7 @@ public class AvroInputFormat<E> extends FileInputFormat<E> implements ResultType
 	private static final Logger LOG = LoggerFactory.getLogger(AvroInputFormat.class);
 
 	private final Class<E> avroValueType;
-	
+
 	private boolean reuseAvroValue = true;
 
 	private transient DataFileReader<E> dataFileReader;
@@ -68,7 +68,7 @@ public class AvroInputFormat<E> extends FileInputFormat<E> implements ResultType
 
 	private transient long recordsReadSinceLastSync;
 
-	private long lastSync = -1l;
+	private long lastSync = -1L;
 
 	public AvroInputFormat(Path filePath, Class<E> type) {
 		super(filePath);
@@ -91,16 +91,16 @@ public class AvroInputFormat<E> extends FileInputFormat<E> implements ResultType
 	public void setUnsplittable(boolean unsplittable) {
 		this.unsplittable = unsplittable;
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// Typing
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Override
 	public TypeInformation<E> getProducedType() {
 		return TypeExtractor.getForClass(this.avroValueType);
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// Input Format Methods
 	// --------------------------------------------------------------------------------------------
@@ -155,7 +155,7 @@ public class AvroInputFormat<E> extends FileInputFormat<E> implements ResultType
 
 		// if we start a new block, then register the event, and
 		// restart the counter.
-		if(dataFileReader.previousSync() != lastSync) {
+		if (dataFileReader.previousSync() != lastSync) {
 			lastSync = dataFileReader.previousSync();
 			recordsReadSinceLastSync = 0;
 		}
@@ -199,7 +199,7 @@ public class AvroInputFormat<E> extends FileInputFormat<E> implements ResultType
 			// open and read until the record we were before
 			// the checkpoint and discard the values
 			dataFileReader.seek(lastSync);
-			for(int i = 0; i < recordsReadSinceLastSync; i++) {
+			for (int i = 0; i < recordsReadSinceLastSync; i++) {
 				dataFileReader.next(null);
 			}
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroOutputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroOutputFormat.java b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroOutputFormat.java
index ae90362..aed40bf 100644
--- a/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroOutputFormat.java
+++ b/flink-connectors/flink-avro/src/main/java/org/apache/flink/api/java/io/AvroOutputFormat.java
@@ -15,8 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.api.java.io;
 
+import org.apache.flink.api.common.io.FileOutputFormat;
+import org.apache.flink.configuration.ConfigConstants;
+import org.apache.flink.core.fs.Path;
+
 import org.apache.avro.Schema;
 import org.apache.avro.file.CodecFactory;
 import org.apache.avro.file.DataFileWriter;
@@ -24,15 +29,16 @@ import org.apache.avro.io.DatumWriter;
 import org.apache.avro.reflect.ReflectData;
 import org.apache.avro.reflect.ReflectDatumWriter;
 import org.apache.avro.specific.SpecificDatumWriter;
-import org.apache.flink.api.common.io.FileOutputFormat;
-import org.apache.flink.configuration.ConfigConstants;
-import org.apache.flink.core.fs.Path;
 
 import java.io.IOException;
 import java.io.Serializable;
 
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
+/**
+ * {@link FileOutputFormat} for Avro records.
+ * @param <E>
+ */
 public class AvroOutputFormat<E> extends FileOutputFormat<E> implements Serializable {
 
 	/**
@@ -40,11 +46,11 @@ public class AvroOutputFormat<E> extends FileOutputFormat<E> implements Serializ
 	 */
 	public enum Codec {
 
-		NULL((byte)0, CodecFactory.nullCodec()),
-		SNAPPY((byte)1, CodecFactory.snappyCodec()),
-		BZIP2((byte)2, CodecFactory.bzip2Codec()),
-		DEFLATE((byte)3, CodecFactory.deflateCodec(CodecFactory.DEFAULT_DEFLATE_LEVEL)),
-		XZ((byte)4, CodecFactory.xzCodec(CodecFactory.DEFAULT_XZ_LEVEL));
+		NULL((byte) 0, CodecFactory.nullCodec()),
+		SNAPPY((byte) 1, CodecFactory.snappyCodec()),
+		BZIP2((byte) 2, CodecFactory.bzip2Codec()),
+		DEFLATE((byte) 3, CodecFactory.deflateCodec(CodecFactory.DEFAULT_DEFLATE_LEVEL)),
+		XZ((byte) 4, CodecFactory.xzCodec(CodecFactory.DEFAULT_XZ_LEVEL));
 
 		private byte codecByte;
 
@@ -80,7 +86,7 @@ public class AvroOutputFormat<E> extends FileOutputFormat<E> implements Serializ
 	private transient Schema userDefinedSchema = null;
 
 	private transient Codec codec = null;
-	
+
 	private transient DataFileWriter<E> dataFileWriter;
 
 	public AvroOutputFormat(Path filePath, Class<E> type) {
@@ -124,7 +130,7 @@ public class AvroOutputFormat<E> extends FileOutputFormat<E> implements Serializ
 		if (org.apache.avro.specific.SpecificRecordBase.class.isAssignableFrom(avroValueType)) {
 			datumWriter = new SpecificDatumWriter<E>(avroValueType);
 			try {
-				schema = ((org.apache.avro.specific.SpecificRecordBase)avroValueType.newInstance()).getSchema();
+				schema = ((org.apache.avro.specific.SpecificRecordBase) avroValueType.newInstance()).getSchema();
 			} catch (InstantiationException | IllegalAccessException e) {
 				throw new RuntimeException(e.getMessage());
 			}
@@ -152,7 +158,7 @@ public class AvroOutputFormat<E> extends FileOutputFormat<E> implements Serializ
 			out.writeByte(-1);
 		}
 
-		if(userDefinedSchema != null) {
+		if (userDefinedSchema != null) {
 			byte[] json = userDefinedSchema.toString().getBytes(ConfigConstants.DEFAULT_CHARSET);
 			out.writeInt(json.length);
 			out.write(json);
@@ -170,7 +176,7 @@ public class AvroOutputFormat<E> extends FileOutputFormat<E> implements Serializ
 		}
 
 		int length = in.readInt();
-		if(length != 0) {
+		if (length != 0) {
 			byte[] json = new byte[length];
 			in.readFully(json);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/assembly/test-assembly.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/assembly/test-assembly.xml b/flink-connectors/flink-avro/src/test/assembly/test-assembly.xml
index 0f4561a..0cbdbe1 100644
--- a/flink-connectors/flink-avro/src/test/assembly/test-assembly.xml
+++ b/flink-connectors/flink-avro/src/test/assembly/test-assembly.xml
@@ -26,11 +26,11 @@ under the License.
 	<fileSets>
 		<fileSet>
 			<directory>${project.build.testOutputDirectory}</directory>
-			<outputDirectory>/</outputDirectory>
+			<outputDirectory></outputDirectory>
 			<!--modify/add include to match your package(s) -->
 			<includes>
 				<include>org/apache/flink/api/avro/testjar/**</include>
 			</includes>
 		</fileSet>
 	</fileSets>
-</assembly>
\ No newline at end of file
+</assembly>

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroExternalJarProgramITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroExternalJarProgramITCase.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroExternalJarProgramITCase.java
index 063a363..7bcba04 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroExternalJarProgramITCase.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroExternalJarProgramITCase.java
@@ -18,21 +18,25 @@
 
 package org.apache.flink.api.avro;
 
-import java.io.File;
-import java.net.URL;
-import java.util.Collections;
-
+import org.apache.flink.api.avro.testjar.AvroExternalJarProgram;
 import org.apache.flink.client.program.PackagedProgram;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.fs.Path;
 import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster;
-
 import org.apache.flink.test.util.TestEnvironment;
 import org.apache.flink.util.TestLogger;
+
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.File;
+import java.net.URL;
+import java.util.Collections;
+
+/**
+ * IT case for the {@link AvroExternalJarProgram}.
+ */
 public class AvroExternalJarProgramITCase extends TestLogger {
 
 	private static final String JAR_FILE = "maven-test-jar.jar";

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroOutputFormatITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroOutputFormatITCase.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroOutputFormatITCase.java
index 3b01ccb..f630f41 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroOutputFormatITCase.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/AvroOutputFormatITCase.java
@@ -18,24 +18,27 @@
 
 package org.apache.flink.api.avro;
 
-import org.junit.Assert;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.avro.file.DataFileReader;
-import org.apache.avro.io.DatumReader;
-import org.apache.avro.reflect.ReflectDatumReader;
-import org.apache.avro.specific.SpecificDatumReader;
+import org.apache.flink.api.common.functions.RichMapFunction;
 import org.apache.flink.api.io.avro.example.User;
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
-import org.apache.flink.api.common.functions.RichMapFunction;
 import org.apache.flink.api.java.io.AvroOutputFormat;
 import org.apache.flink.api.java.tuple.Tuple3;
 import org.apache.flink.test.util.JavaProgramTestBase;
 
+import org.apache.avro.file.DataFileReader;
+import org.apache.avro.io.DatumReader;
+import org.apache.avro.reflect.ReflectDatumReader;
+import org.apache.avro.specific.SpecificDatumReader;
+import org.junit.Assert;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * IT cases for the {@link AvroOutputFormat}.
+ */
 @SuppressWarnings("serial")
 public class AvroOutputFormatITCase extends JavaProgramTestBase {
 
@@ -57,7 +60,6 @@ public class AvroOutputFormatITCase extends JavaProgramTestBase {
 		outputPath2 = getTempDirPath("avro_output2");
 	}
 
-
 	@Override
 	protected void testProgram() throws Exception {
 		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
@@ -129,11 +131,9 @@ public class AvroOutputFormatITCase extends JavaProgramTestBase {
 			Assert.assertTrue("expected user " + expectedResult + " not found.", result2.contains(expectedResult));
 		}
 
-
 	}
 
-
-	public final static class ConvertToUser extends RichMapFunction<Tuple3<String, Integer, String>, User> {
+	private static final class ConvertToUser extends RichMapFunction<Tuple3<String, Integer, String>, User> {
 
 		@Override
 		public User map(Tuple3<String, Integer, String> value) throws Exception {
@@ -141,7 +141,7 @@ public class AvroOutputFormatITCase extends JavaProgramTestBase {
 		}
 	}
 
-	public final static class ConvertToReflective extends RichMapFunction<User, ReflectiveUser> {
+	private static final class ConvertToReflective extends RichMapFunction<User, ReflectiveUser> {
 
 		@Override
 		public ReflectiveUser map(User value) throws Exception {
@@ -149,8 +149,7 @@ public class AvroOutputFormatITCase extends JavaProgramTestBase {
 		}
 	}
 
-	
-	public static class ReflectiveUser {
+	private static class ReflectiveUser {
 		private String name;
 		private int favoriteNumber;
 		private String favoriteColor;
@@ -162,13 +161,15 @@ public class AvroOutputFormatITCase extends JavaProgramTestBase {
 			this.favoriteNumber = favoriteNumber;
 			this.favoriteColor = favoriteColor;
 		}
-		
+
 		public String getName() {
 			return this.name;
 		}
+
 		public String getFavoriteColor() {
 			return this.favoriteColor;
 		}
+
 		public int getFavoriteNumber() {
 			return this.favoriteNumber;
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/EncoderDecoderTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/EncoderDecoderTest.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/EncoderDecoderTest.java
index c39db15..808c257 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/EncoderDecoderTest.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/EncoderDecoderTest.java
@@ -18,6 +18,16 @@
 
 package org.apache.flink.api.avro;
 
+import org.apache.flink.api.io.avro.generated.Address;
+import org.apache.flink.api.io.avro.generated.Colors;
+import org.apache.flink.api.io.avro.generated.Fixed16;
+import org.apache.flink.api.io.avro.generated.User;
+import org.apache.flink.util.StringUtils;
+
+import org.apache.avro.reflect.ReflectDatumReader;
+import org.apache.avro.reflect.ReflectDatumWriter;
+import org.junit.Test;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -29,16 +39,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.avro.reflect.ReflectDatumReader;
-import org.apache.avro.reflect.ReflectDatumWriter;
-import org.apache.flink.api.io.avro.generated.Address;
-import org.apache.flink.api.io.avro.generated.Colors;
-import org.apache.flink.api.io.avro.generated.Fixed16;
-import org.apache.flink.api.io.avro.generated.User;
-import org.apache.flink.util.StringUtils;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 /**
  * Tests the {@link DataOutputEncoder} and {@link DataInputDecoder} classes for Avro serialization.
@@ -48,32 +51,32 @@ public class EncoderDecoderTest {
 	public void testComplexStringsDirecty() {
 		try {
 			Random rnd = new Random(349712539451944123L);
-			
+
 			for (int i = 0; i < 10; i++) {
 				String testString = StringUtils.getRandomString(rnd, 10, 100);
-				
+
 				ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
 				{
 					DataOutputStream dataOut = new DataOutputStream(baos);
 					DataOutputEncoder encoder = new DataOutputEncoder();
 					encoder.setOut(dataOut);
-					
+
 					encoder.writeString(testString);
 					dataOut.flush();
 					dataOut.close();
 				}
-				
+
 				byte[] data = baos.toByteArray();
-				
+
 				// deserialize
 				{
 					ByteArrayInputStream bais = new ByteArrayInputStream(data);
 					DataInputStream dataIn = new DataInputStream(bais);
 					DataInputDecoder decoder = new DataInputDecoder();
 					decoder.setIn(dataIn);
-	
+
 					String deserialized = decoder.readString();
-					
+
 					assertEquals(testString, deserialized);
 				}
 			}
@@ -84,49 +87,49 @@ public class EncoderDecoderTest {
 			fail("Test failed due to an exception: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testPrimitiveTypes() {
-		
+
 		testObjectSerialization(new Boolean(true));
 		testObjectSerialization(new Boolean(false));
-		
+
 		testObjectSerialization(Byte.valueOf((byte) 0));
 		testObjectSerialization(Byte.valueOf((byte) 1));
 		testObjectSerialization(Byte.valueOf((byte) -1));
 		testObjectSerialization(Byte.valueOf(Byte.MIN_VALUE));
 		testObjectSerialization(Byte.valueOf(Byte.MAX_VALUE));
-		
+
 		testObjectSerialization(Short.valueOf((short) 0));
 		testObjectSerialization(Short.valueOf((short) 1));
 		testObjectSerialization(Short.valueOf((short) -1));
 		testObjectSerialization(Short.valueOf(Short.MIN_VALUE));
 		testObjectSerialization(Short.valueOf(Short.MAX_VALUE));
-		
+
 		testObjectSerialization(Integer.valueOf(0));
 		testObjectSerialization(Integer.valueOf(1));
 		testObjectSerialization(Integer.valueOf(-1));
 		testObjectSerialization(Integer.valueOf(Integer.MIN_VALUE));
 		testObjectSerialization(Integer.valueOf(Integer.MAX_VALUE));
-		
+
 		testObjectSerialization(Long.valueOf(0));
 		testObjectSerialization(Long.valueOf(1));
 		testObjectSerialization(Long.valueOf(-1));
 		testObjectSerialization(Long.valueOf(Long.MIN_VALUE));
 		testObjectSerialization(Long.valueOf(Long.MAX_VALUE));
-		
+
 		testObjectSerialization(Float.valueOf(0));
 		testObjectSerialization(Float.valueOf(1));
 		testObjectSerialization(Float.valueOf(-1));
-		testObjectSerialization(Float.valueOf((float)Math.E));
-		testObjectSerialization(Float.valueOf((float)Math.PI));
+		testObjectSerialization(Float.valueOf((float) Math.E));
+		testObjectSerialization(Float.valueOf((float) Math.PI));
 		testObjectSerialization(Float.valueOf(Float.MIN_VALUE));
 		testObjectSerialization(Float.valueOf(Float.MAX_VALUE));
 		testObjectSerialization(Float.valueOf(Float.MIN_NORMAL));
 		testObjectSerialization(Float.valueOf(Float.NaN));
 		testObjectSerialization(Float.valueOf(Float.NEGATIVE_INFINITY));
 		testObjectSerialization(Float.valueOf(Float.POSITIVE_INFINITY));
-		
+
 		testObjectSerialization(Double.valueOf(0));
 		testObjectSerialization(Double.valueOf(1));
 		testObjectSerialization(Double.valueOf(-1));
@@ -138,15 +141,15 @@ public class EncoderDecoderTest {
 		testObjectSerialization(Double.valueOf(Double.NaN));
 		testObjectSerialization(Double.valueOf(Double.NEGATIVE_INFINITY));
 		testObjectSerialization(Double.valueOf(Double.POSITIVE_INFINITY));
-		
+
 		testObjectSerialization("");
 		testObjectSerialization("abcdefg");
 		testObjectSerialization("ab\u1535\u0155xyz\u706F");
-		
+
 		testObjectSerialization(new SimpleTypes(3637, 54876486548L, (byte) 65, "We're out looking for astronauts", (short) 0x2387, 2.65767523));
 		testObjectSerialization(new SimpleTypes(705608724, -1L, (byte) -65, "Serve me the sky with a big slice of lemon", (short) Byte.MIN_VALUE, 0.0000001));
 	}
-	
+
 	@Test
 	public void testArrayTypes() {
 		{
@@ -170,7 +173,7 @@ public class EncoderDecoderTest {
 			testObjectSerialization(array);
 		}
 	}
-	
+
 	@Test
 	public void testEmptyArray() {
 		{
@@ -194,14 +197,14 @@ public class EncoderDecoderTest {
 			testObjectSerialization(array);
 		}
 	}
-	
+
 	@Test
 	public void testObjects() {
 		// simple object containing only primitives
 		{
 			testObjectSerialization(new Book(976243875L, "The Serialization Odysse", 42));
 		}
-		
+
 		// object with collection
 		{
 			ArrayList<String> list = new ArrayList<String>();
@@ -210,22 +213,22 @@ public class EncoderDecoderTest {
 			list.add("C");
 			list.add("D");
 			list.add("E");
-			
+
 			testObjectSerialization(new BookAuthor(976243875L, list, "Arno Nym"));
 		}
-		
+
 		// object with empty collection
 		{
 			ArrayList<String> list = new ArrayList<String>();
 			testObjectSerialization(new BookAuthor(987654321L, list, "The Saurus"));
 		}
 	}
-	
+
 	@Test
 	public void testNestedObjectsWithCollections() {
 		testObjectSerialization(new ComplexNestedObject2(true));
 	}
-	
+
 	@Test
 	public void testGeneratedObjectWithNullableFields() {
 		List<CharSequence> strings = Arrays.asList(new CharSequence[] { "These", "strings", "should", "be", "recognizable", "as", "a", "meaningful", "sequence" });
@@ -243,33 +246,33 @@ public class EncoderDecoderTest {
 		User user = new User("Freudenreich", 1337, "macintosh gray",
 				1234567890L, 3.1415926, null, true, strings, bools, null,
 				Colors.GREEN, map, f, new Boolean(true), addr);
-		
+
 		testObjectSerialization(user);
 	}
-	
+
 	@Test
 	public void testVarLenCountEncoding() {
 		try {
 			long[] values = new long[] { 0, 1, 2, 3, 4, 0, 574, 45236, 0, 234623462, 23462462346L, 0, 9734028767869761L, 0x7fffffffffffffffL};
-			
+
 			// write
 			ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
 			{
 				DataOutputStream dataOut = new DataOutputStream(baos);
-				
+
 				for (long val : values) {
 					DataOutputEncoder.writeVarLongCount(dataOut, val);
 				}
-				
+
 				dataOut.flush();
 				dataOut.close();
 			}
-			
+
 			// read
 			{
 				ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
 				DataInputStream dataIn = new DataInputStream(bais);
-				
+
 				for (long val : values) {
 					long read = DataInputDecoder.readVarLongCount(dataIn);
 					assertEquals("Wrong var-len encoded value read.", val, read);
@@ -282,30 +285,30 @@ public class EncoderDecoderTest {
 			fail("Test failed due to an exception: " + e.getMessage());
 		}
 	}
-	
+
 	private static <X> void testObjectSerialization(X obj) {
-		
+
 		try {
-			
+
 			// serialize
 			ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
 			{
 				DataOutputStream dataOut = new DataOutputStream(baos);
 				DataOutputEncoder encoder = new DataOutputEncoder();
 				encoder.setOut(dataOut);
-				
+
 				@SuppressWarnings("unchecked")
 				Class<X> clazz = (Class<X>) obj.getClass();
 				ReflectDatumWriter<X> writer = new ReflectDatumWriter<X>(clazz);
-				
+
 				writer.write(obj, encoder);
 				dataOut.flush();
 				dataOut.close();
 			}
-			
+
 			byte[] data = baos.toByteArray();
 			X result = null;
-			
+
 			// deserialize
 			{
 				ByteArrayInputStream bais = new ByteArrayInputStream(data);
@@ -316,21 +319,21 @@ public class EncoderDecoderTest {
 				@SuppressWarnings("unchecked")
 				Class<X> clazz = (Class<X>) obj.getClass();
 				ReflectDatumReader<X> reader = new ReflectDatumReader<X>(clazz);
-				
-				// create a reuse object if possible, otherwise we have no reuse object 
+
+				// create a reuse object if possible, otherwise we have no reuse object
 				X reuse = null;
 				try {
 					@SuppressWarnings("unchecked")
 					X test = (X) obj.getClass().newInstance();
 					reuse = test;
 				} catch (Throwable t) {}
-				
+
 				result = reader.read(reuse, decoder);
 			}
-			
+
 			// check
 			final String message = "Deserialized object is not the same as the original";
-			
+
 			if (obj.getClass().isArray()) {
 				Class<?> clazz = obj.getClass();
 				if (clazz == byte[].class) {
@@ -366,26 +369,24 @@ public class EncoderDecoderTest {
 			fail("Test failed due to an exception: " + e.getMessage());
 		}
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	//  Test Objects
 	// --------------------------------------------------------------------------------------------
 
+	private static final class SimpleTypes {
 
-	public static final class SimpleTypes {
-		
 		private final int iVal;
 		private final long lVal;
 		private final byte bVal;
 		private final String sVal;
 		private final short rVal;
 		private final double dVal;
-		
-		
+
 		public SimpleTypes() {
 			this(0, 0, (byte) 0, "", (short) 0, 0);
 		}
-		
+
 		public SimpleTypes(int iVal, long lVal, byte bVal, String sVal, short rVal, double dVal) {
 			this.iVal = iVal;
 			this.lVal = lVal;
@@ -394,36 +395,36 @@ public class EncoderDecoderTest {
 			this.rVal = rVal;
 			this.dVal = dVal;
 		}
-		
+
 		@Override
 		public boolean equals(Object obj) {
 			if (obj.getClass() == SimpleTypes.class) {
 				SimpleTypes other = (SimpleTypes) obj;
-				
+
 				return other.iVal == this.iVal &&
 						other.lVal == this.lVal &&
 						other.bVal == this.bVal &&
 						other.sVal.equals(this.sVal) &&
 						other.rVal == this.rVal &&
 						other.dVal == this.dVal;
-				
+
 			} else {
 				return false;
 			}
 		}
 	}
-	
-	public static class ComplexNestedObject1 {
-		
+
+	private static class ComplexNestedObject1 {
+
 		private double doubleValue;
-		
+
 		private List<String> stringList;
-		
+
 		public ComplexNestedObject1() {}
-		
+
 		public ComplexNestedObject1(int offInit) {
 			this.doubleValue = 6293485.6723 + offInit;
-				
+
 			this.stringList = new ArrayList<String>();
 			this.stringList.add("A" + offInit);
 			this.stringList.add("somewhat" + offInit);
@@ -432,7 +433,7 @@ public class EncoderDecoderTest {
 			this.stringList.add("of" + offInit);
 			this.stringList.add("strings" + offInit);
 		}
-		
+
 		@Override
 		public boolean equals(Object obj) {
 			if (obj.getClass() == ComplexNestedObject1.class) {
@@ -443,18 +444,18 @@ public class EncoderDecoderTest {
 			}
 		}
 	}
-	
-	public static class ComplexNestedObject2 {
-		
+
+	private static class ComplexNestedObject2 {
+
 		private long longValue;
-		
+
 		private Map<String, ComplexNestedObject1> theMap;
-		
+
 		public ComplexNestedObject2() {}
-		
+
 		public ComplexNestedObject2(boolean init) {
 			this.longValue = 46547;
-				
+
 			this.theMap = new HashMap<String, ComplexNestedObject1>();
 			this.theMap.put("36354L", new ComplexNestedObject1(43546543));
 			this.theMap.put("785611L", new ComplexNestedObject1(45784568));
@@ -463,7 +464,7 @@ public class EncoderDecoderTest {
 			this.theMap.put("1919876876896L", new ComplexNestedObject1(27154));
 			this.theMap.put("-868468468L", new ComplexNestedObject1(546435));
 		}
-		
+
 		@Override
 		public boolean equals(Object obj) {
 			if (obj.getClass() == ComplexNestedObject2.class) {
@@ -474,8 +475,8 @@ public class EncoderDecoderTest {
 			}
 		}
 	}
-	
-	public static class Book {
+
+	private static class Book {
 
 		private long bookId;
 		private String title;
@@ -488,7 +489,7 @@ public class EncoderDecoderTest {
 			this.title = title;
 			this.authorId = authorId;
 		}
-		
+
 		@Override
 		public boolean equals(Object obj) {
 			if (obj.getClass() == Book.class) {
@@ -500,7 +501,7 @@ public class EncoderDecoderTest {
 		}
 	}
 
-	public static class BookAuthor {
+	private static class BookAuthor {
 
 		private long authorId;
 		private List<String> bookTitles;
@@ -513,7 +514,7 @@ public class EncoderDecoderTest {
 			this.bookTitles = bookTitles;
 			this.authorName = authorName;
 		}
-		
+
 		@Override
 		public boolean equals(Object obj) {
 			if (obj.getClass() == BookAuthor.class) {

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/testjar/AvroExternalJarProgram.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/testjar/AvroExternalJarProgram.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/testjar/AvroExternalJarProgram.java
index 1174786..a8541b6 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/testjar/AvroExternalJarProgram.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/avro/testjar/AvroExternalJarProgram.java
@@ -16,18 +16,21 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.api.avro.testjar;
 
-// ================================================================================================
-//  This file defines the classes for the AvroExternalJarProgramITCase.
-//  The program is exported into src/test/resources/AvroTestProgram.jar.
-//
-//  THIS FILE MUST STAY FULLY COMMENTED SUCH THAT THE HERE DEFINED CLASSES ARE NOT COMPILED
-//  AND ADDED TO THE test-classes DIRECTORY. OTHERWISE, THE EXTERNAL CLASS LOADING WILL
-//  NOT BE COVERED BY THIS TEST.
-// ================================================================================================
+import org.apache.flink.api.common.functions.RichMapFunction;
+import org.apache.flink.api.common.functions.RichReduceFunction;
+import org.apache.flink.api.java.DataSet;
+import org.apache.flink.api.java.ExecutionEnvironment;
+import org.apache.flink.api.java.io.AvroInputFormat;
+import org.apache.flink.api.java.io.DiscardingOutputFormat;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.core.fs.Path;
 
+import org.apache.avro.file.DataFileWriter;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.reflect.ReflectData;
+import org.apache.avro.reflect.ReflectDatumWriter;
 
 import java.io.File;
 import java.io.IOException;
@@ -35,100 +38,90 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.avro.file.DataFileWriter;
-import org.apache.avro.io.DatumWriter;
-import org.apache.avro.reflect.ReflectData;
-import org.apache.avro.reflect.ReflectDatumWriter;
-import org.apache.flink.api.common.functions.RichMapFunction;
-import org.apache.flink.api.common.functions.RichReduceFunction;
-import org.apache.flink.api.java.io.DiscardingOutputFormat;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.api.java.DataSet;
-import org.apache.flink.api.java.ExecutionEnvironment;
-import org.apache.flink.api.java.io.AvroInputFormat;
-import org.apache.flink.core.fs.Path;
-
+/**
+ * This file defines the classes for the AvroExternalJarProgramITCase.
+ */
 public class AvroExternalJarProgram  {
 
-	public static final class Color {
-		
+	private static final class Color {
+
 		private String name;
 		private double saturation;
-		
+
 		public Color() {
 			name = "";
 			saturation = 1.0;
 		}
-		
+
 		public Color(String name, double saturation) {
 			this.name = name;
 			this.saturation = saturation;
 		}
-		
+
 		public String getName() {
 			return name;
 		}
-		
+
 		public void setName(String name) {
 			this.name = name;
 		}
-		
+
 		public double getSaturation() {
 			return saturation;
 		}
-		
+
 		public void setSaturation(double saturation) {
 			this.saturation = saturation;
 		}
-		
+
 		@Override
 		public String toString() {
 			return name + '(' + saturation + ')';
 		}
 	}
-	
-	public static final class MyUser {
-		
+
+	private static final class MyUser {
+
 		private String name;
 		private List<Color> colors;
-		
+
 		public MyUser() {
 			name = "unknown";
 			colors = new ArrayList<Color>();
 		}
-		
+
 		public MyUser(String name, List<Color> colors) {
 			this.name = name;
 			this.colors = colors;
 		}
-		
+
 		public String getName() {
 			return name;
 		}
-		
+
 		public List<Color> getColors() {
 			return colors;
 		}
-		
+
 		public void setName(String name) {
 			this.name = name;
 		}
-		
+
 		public void setColors(List<Color> colors) {
 			this.colors = colors;
 		}
-		
+
 		@Override
 		public String toString() {
 			return name + " : " + colors;
 		}
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
-	
+
 	// --------------------------------------------------------------------------------------------
-	
-	public static final class NameExtractor extends RichMapFunction<MyUser, Tuple2<String, MyUser>> {
+
+	private static final class NameExtractor extends RichMapFunction<MyUser, Tuple2<String, MyUser>> {
 		private static final long serialVersionUID = 1L;
 
 		@Override
@@ -137,8 +130,8 @@ public class AvroExternalJarProgram  {
 			return new Tuple2<String, MyUser>(namePrefix, u);
 		}
 	}
-	
-	public static final class NameGrouper extends RichReduceFunction<Tuple2<String, MyUser>> {
+
+	private static final class NameGrouper extends RichReduceFunction<Tuple2<String, MyUser>> {
 		private static final long serialVersionUID = 1L;
 
 		@Override
@@ -150,52 +143,51 @@ public class AvroExternalJarProgram  {
 	// --------------------------------------------------------------------------------------------
 	//  Test Data
 	// --------------------------------------------------------------------------------------------
-	
-	public static final class Generator {
-		
+
+	private static final class Generator {
+
 		private final Random rnd = new Random(2389756789345689276L);
-		
+
 		public MyUser nextUser() {
 			return randomUser();
 		}
-		
+
 		private MyUser randomUser() {
-			
+
 			int numColors = rnd.nextInt(5);
 			ArrayList<Color> colors = new ArrayList<Color>(numColors);
 			for (int i = 0; i < numColors; i++) {
 				colors.add(new Color(randomString(), rnd.nextDouble()));
 			}
-			
+
 			return new MyUser(randomString(), colors);
 		}
-		
+
 		private String randomString() {
 			char[] c = new char[this.rnd.nextInt(20) + 5];
-			
+
 			for (int i = 0; i < c.length; i++) {
 				c[i] = (char) (this.rnd.nextInt(150) + 40);
 			}
-			
+
 			return new String(c);
 		}
 	}
-	
+
 	public static void writeTestData(File testFile, int numRecords) throws IOException {
-		
+
 		DatumWriter<MyUser> userDatumWriter = new ReflectDatumWriter<MyUser>(MyUser.class);
 		DataFileWriter<MyUser> dataFileWriter = new DataFileWriter<MyUser>(userDatumWriter);
-		
+
 		dataFileWriter.create(ReflectData.get().getSchema(MyUser.class), testFile);
-		
-		
+
 		Generator generator = new Generator();
-		
+
 		for (int i = 0; i < numRecords; i++) {
 			MyUser user = generator.nextUser();
 			dataFileWriter.append(user);
 		}
-		
+
 		dataFileWriter.close();
 	}
 
@@ -203,17 +195,17 @@ public class AvroExternalJarProgram  {
 //		String testDataFile = new File("src/test/resources/testdata.avro").getAbsolutePath();
 //		writeTestData(new File(testDataFile), 50);
 //	}
-	
+
 	public static void main(String[] args) throws Exception {
 		String inputPath = args[0];
-		
+
 		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-		
+
 		DataSet<MyUser> input = env.createInput(new AvroInputFormat<MyUser>(new Path(inputPath), MyUser.class));
-	
+
 		DataSet<Tuple2<String, MyUser>> result = input.map(new NameExtractor()).groupBy(0).reduce(new NameGrouper());
-		
-		result.output(new DiscardingOutputFormat<Tuple2<String,MyUser>>());
+
+		result.output(new DiscardingOutputFormat<Tuple2<String, MyUser>>());
 		env.execute();
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroPojoTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroPojoTest.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroPojoTest.java
index f33f433..be968c5 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroPojoTest.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroPojoTest.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.api.io.avro;
 
 import org.apache.flink.api.common.ExecutionConfig;
@@ -29,6 +30,7 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.core.fs.Path;
 import org.apache.flink.test.util.MultipleProgramsTestBase;
 import org.apache.flink.util.Collector;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -43,6 +45,9 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 
+/**
+ * Tests for the {@link AvroInputFormat} reading Pojos.
+ */
 @RunWith(Parameterized.class)
 public class AvroPojoTest extends MultipleProgramsTestBase {
 	public AvroPojoTest(TestExecutionMode mode) {
@@ -88,7 +93,6 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 
 		env.execute("Simple Avro read job");
 
-
 		expected = "{\"name\": \"Alyssa\", \"favorite_number\": 256, \"favorite_color\": null, \"type_long_test\": null, \"type_double_test\": 123.45, \"type_null_test\": null, \"type_bool_test\": true, \"type_array_string\": [\"ELEMENT 1\", \"ELEMENT 2\"], \"type_array_boolean\": [true, false], \"type_nullable_array\": null, \"type_enum\": \"GREEN\", \"type_map\": null, \"type_fixed\": null, \"type_union\": null, \"type_nested\": {\"num\": 239, \"street\": \"Baker Street\", \"city\": \"London\", \"state\": \"London\", \"zip\": \"NW1 6XE\"}}\n" +
 					"{\"name\": \"Charlie\", \"favorite_number\": null, \"favorite_color\": \"blue\", \"type_long_test\": 1337, \"type_double_test\": 1.337, \"type_null_test\": null, \"type_bool_test\": false, \"type_array_string\": [], \"type_array_boolean\": [], \"type_nullable_array\": null, \"type_enum\": \"RED\", \"type_map\": null, \"type_fixed\": null, \"type_union\": null, \"type_nested\": {\"num\": 239, \"street\": \"Baker Street\", \"city\": \"London\", \"state\": \"London\", \"zip\": \"NW1 6XE\"}}\n";
 	}
@@ -116,7 +120,6 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 
 		env.execute("Simple Avro read job");
 
-
 		expected = "{\"name\": \"Alyssa\", \"favorite_number\": 256, \"favorite_color\": null, \"type_long_test\": null, \"type_double_test\": 123.45, \"type_null_test\": null, \"type_bool_test\": true, \"type_array_string\": [\"ELEMENT 1\", \"ELEMENT 2\"], \"type_array_boolean\": [true, false], \"type_nullable_array\": null, \"type_enum\": \"GREEN\", \"type_map\": {\"hehe\": 12}, \"type_fixed\": null, \"type_union\": null, \"type_nested\": {\"num\": 239, \"street\": \"Baker Street\", \"city\": \"London\", \"state\": \"London\", \"zip\": \"NW1 6XE\"}}\n" +
 					"{\"name\": \"Charlie\", \"favorite_number\": null, \"favorite_color\": \"blue\", \"type_long_test\": 1337, \"type_double_test\": 1.337, \"type_null_test\": null, \"type_bool_test\": false, \"type_array_string\": [], \"type_array_boolean\": [], \"type_nullable_array\": null, \"type_enum\": \"RED\", \"type_map\": {\"hehe\": 12}, \"type_fixed\": null, \"type_union\": null, \"type_nested\": {\"num\": 239, \"street\": \"Baker Street\", \"city\": \"London\", \"state\": \"London\", \"zip\": \"NW1 6XE\"}}\n";
 
@@ -142,7 +145,6 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 		res.writeAsText(resultPath);
 		env.execute("Avro Key selection");
 
-
 		expected = "(Alyssa,1)\n(Charlie,1)\n";
 	}
 
@@ -163,7 +165,7 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 		}).reduceGroup(new GroupReduceFunction<User, Tuple2<String, Integer>>() {
 			@Override
 			public void reduce(Iterable<User> values, Collector<Tuple2<String, Integer>> out) throws Exception {
-				for(User u : values) {
+				for (User u : values) {
 					out.collect(new Tuple2<String, Integer>(u.getName().toString(), 1));
 				}
 			}
@@ -172,7 +174,6 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 		res.writeAsText(resultPath);
 		env.execute("Avro Key selection");
 
-
 		expected = "(Charlie,1)\n(Alyssa,1)\n";
 	}
 
@@ -202,16 +203,15 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 		res.writeAsText(resultPath);
 		env.execute("Avro Key selection");
 
-
 		expected = "(Charlie,1)\n(Alyssa,1)\n";
 	}
 
 	/**
-	 * Test some know fields for grouping on
+	 * Test some know fields for grouping on.
 	 */
 	@Test
 	public void testAllFields() throws Exception {
-		for(String fieldName : Arrays.asList("name", "type_enum", "type_double_test")) {
+		for (String fieldName : Arrays.asList("name", "type_enum", "type_double_test")) {
 			testField(fieldName);
 		}
 	}
@@ -228,7 +228,7 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 		DataSet<Object> res = usersDS.groupBy(fieldName).reduceGroup(new GroupReduceFunction<User, Object>() {
 			@Override
 			public void reduce(Iterable<User> values, Collector<Object> out) throws Exception {
-				for(User u : values) {
+				for (User u : values) {
 					out.collect(u.get(fieldName));
 				}
 			}
@@ -240,11 +240,11 @@ public class AvroPojoTest extends MultipleProgramsTestBase {
 		ExecutionConfig ec = env.getConfig();
 		Assert.assertTrue(ec.getRegisteredKryoTypes().contains(org.apache.flink.api.io.avro.generated.Fixed16.class));
 
-		if(fieldName.equals("name")) {
+		if (fieldName.equals("name")) {
 			expected = "Alyssa\nCharlie";
-		} else if(fieldName.equals("type_enum")) {
+		} else if (fieldName.equals("type_enum")) {
 			expected = "GREEN\nRED\n";
-		} else if(fieldName.equals("type_double_test")) {
+		} else if (fieldName.equals("type_double_test")) {
 			expected = "123.45\n1.337\n";
 		} else {
 			Assert.fail("Unknown field");

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroRecordInputFormatTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroRecordInputFormatTest.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroRecordInputFormatTest.java
index 3b6ad63..7bff28a 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroRecordInputFormatTest.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroRecordInputFormatTest.java
@@ -18,18 +18,6 @@
 
 package org.apache.flink.api.io.avro;
 
-import org.apache.avro.Schema;
-import org.apache.avro.file.DataFileReader;
-import org.apache.avro.file.DataFileWriter;
-import org.apache.avro.file.FileReader;
-import org.apache.avro.generic.GenericData;
-import org.apache.avro.generic.GenericDatumReader;
-import org.apache.avro.generic.GenericRecord;
-import org.apache.avro.io.DatumReader;
-import org.apache.avro.io.DatumWriter;
-import org.apache.avro.specific.SpecificDatumReader;
-import org.apache.avro.specific.SpecificDatumWriter;
-import org.apache.avro.util.Utf8;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
@@ -46,6 +34,19 @@ import org.apache.flink.core.fs.FileInputSplit;
 import org.apache.flink.core.fs.Path;
 import org.apache.flink.core.memory.DataInputViewStreamWrapper;
 import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
+
+import org.apache.avro.Schema;
+import org.apache.avro.file.DataFileReader;
+import org.apache.avro.file.DataFileWriter;
+import org.apache.avro.file.FileReader;
+import org.apache.avro.generic.GenericData;
+import org.apache.avro.generic.GenericDatumReader;
+import org.apache.avro.generic.GenericRecord;
+import org.apache.avro.io.DatumReader;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.specific.SpecificDatumReader;
+import org.apache.avro.specific.SpecificDatumWriter;
+import org.apache.avro.util.Utf8;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -73,33 +74,31 @@ import static org.junit.Assert.assertTrue;
  * http://avro.apache.org/docs/current/gettingstartedjava.html
  */
 public class AvroRecordInputFormatTest {
-	
+
 	public File testFile;
-	
-	final static String TEST_NAME = "Alyssa";
-	
-	final static String TEST_ARRAY_STRING_1 = "ELEMENT 1";
-	final static String TEST_ARRAY_STRING_2 = "ELEMENT 2";
-	
-	final static boolean TEST_ARRAY_BOOLEAN_1 = true;
-	final static boolean TEST_ARRAY_BOOLEAN_2 = false;
-	
-	final static Colors TEST_ENUM_COLOR = Colors.GREEN;
-	
-	final static String TEST_MAP_KEY1 = "KEY 1";
-	final static long TEST_MAP_VALUE1 = 8546456L;
-	final static String TEST_MAP_KEY2 = "KEY 2";
-	final static long TEST_MAP_VALUE2 = 17554L;
-	
-	final static int TEST_NUM = 239;
-	final static String TEST_STREET = "Baker Street";
-	final static String TEST_CITY = "London";
-	final static String TEST_STATE = "London";
-	final static String TEST_ZIP = "NW1 6XE";
-	
 
-	private Schema userSchema = new User().getSchema();
+	static final String TEST_NAME = "Alyssa";
+
+	static final String TEST_ARRAY_STRING_1 = "ELEMENT 1";
+	static final String TEST_ARRAY_STRING_2 = "ELEMENT 2";
+
+	static final boolean TEST_ARRAY_BOOLEAN_1 = true;
+	static final boolean TEST_ARRAY_BOOLEAN_2 = false;
+
+	static final Colors TEST_ENUM_COLOR = Colors.GREEN;
 
+	static final String TEST_MAP_KEY1 = "KEY 1";
+	static final long TEST_MAP_VALUE1 = 8546456L;
+	static final String TEST_MAP_KEY2 = "KEY 2";
+	static final long TEST_MAP_VALUE2 = 17554L;
+
+	static final int TEST_NUM = 239;
+	static final String TEST_STREET = "Baker Street";
+	static final String TEST_CITY = "London";
+	static final String TEST_STATE = "London";
+	static final String TEST_ZIP = "NW1 6XE";
+
+	private Schema userSchema = new User().getSchema();
 
 	public static void writeTestFile(File testFile) throws IOException {
 		ArrayList<CharSequence> stringArray = new ArrayList<CharSequence>();
@@ -113,7 +112,7 @@ public class AvroRecordInputFormatTest {
 		HashMap<CharSequence, Long> longMap = new HashMap<CharSequence, Long>();
 		longMap.put(TEST_MAP_KEY1, TEST_MAP_VALUE1);
 		longMap.put(TEST_MAP_KEY2, TEST_MAP_VALUE2);
-		
+
 		Address addr = new Address();
 		addr.setNum(TEST_NUM);
 		addr.setStreet(TEST_STREET);
@@ -121,7 +120,6 @@ public class AvroRecordInputFormatTest {
 		addr.setState(TEST_STATE);
 		addr.setZip(TEST_ZIP);
 
-
 		User user1 = new User();
 
 		user1.setName(TEST_NAME);
@@ -162,13 +160,13 @@ public class AvroRecordInputFormatTest {
 		dataFileWriter.append(user2);
 		dataFileWriter.close();
 	}
+
 	@Before
 	public void createFiles() throws IOException {
 		testFile = File.createTempFile("AvroInputFormatTest", null);
 		writeTestFile(testFile);
 	}
 
-
 	/**
 	 * Test if the AvroInputFormat is able to properly read data from an avro file.
 	 * @throws IOException
@@ -176,45 +174,45 @@ public class AvroRecordInputFormatTest {
 	@Test
 	public void testDeserialisation() throws IOException {
 		Configuration parameters = new Configuration();
-		
+
 		AvroInputFormat<User> format = new AvroInputFormat<User>(new Path(testFile.getAbsolutePath()), User.class);
-		
+
 		format.configure(parameters);
 		FileInputSplit[] splits = format.createInputSplits(1);
 		assertEquals(splits.length, 1);
 		format.open(splits[0]);
-		
+
 		User u = format.nextRecord(null);
 		assertNotNull(u);
-		
+
 		String name = u.getName().toString();
 		assertNotNull("empty record", name);
 		assertEquals("name not equal", TEST_NAME, name);
-		
+
 		// check arrays
 		List<CharSequence> sl = u.getTypeArrayString();
 		assertEquals("element 0 not equal", TEST_ARRAY_STRING_1, sl.get(0).toString());
 		assertEquals("element 1 not equal", TEST_ARRAY_STRING_2, sl.get(1).toString());
-		
+
 		List<Boolean> bl = u.getTypeArrayBoolean();
 		assertEquals("element 0 not equal", TEST_ARRAY_BOOLEAN_1, bl.get(0));
 		assertEquals("element 1 not equal", TEST_ARRAY_BOOLEAN_2, bl.get(1));
-		
+
 		// check enums
 		Colors enumValue = u.getTypeEnum();
 		assertEquals("enum not equal", TEST_ENUM_COLOR, enumValue);
-		
+
 		// check maps
 		Map<CharSequence, Long> lm = u.getTypeMap();
 		assertEquals("map value of key 1 not equal", TEST_MAP_VALUE1, lm.get(new Utf8(TEST_MAP_KEY1)).longValue());
 		assertEquals("map value of key 2 not equal", TEST_MAP_VALUE2, lm.get(new Utf8(TEST_MAP_KEY2)).longValue());
-		
+
 		assertFalse("expecting second element", format.reachedEnd());
 		assertNotNull("expecting second element", format.nextRecord(u));
-		
+
 		assertNull(format.nextRecord(u));
 		assertTrue(format.reachedEnd());
-		
+
 		format.close();
 	}
 
@@ -225,46 +223,46 @@ public class AvroRecordInputFormatTest {
 	@Test
 	public void testDeserialisationReuseAvroRecordFalse() throws IOException {
 		Configuration parameters = new Configuration();
-		
+
 		AvroInputFormat<User> format = new AvroInputFormat<User>(new Path(testFile.getAbsolutePath()), User.class);
 		format.setReuseAvroValue(false);
-		
+
 		format.configure(parameters);
 		FileInputSplit[] splits = format.createInputSplits(1);
 		assertEquals(splits.length, 1);
 		format.open(splits[0]);
-		
+
 		User u = format.nextRecord(null);
 		assertNotNull(u);
-		
+
 		String name = u.getName().toString();
 		assertNotNull("empty record", name);
 		assertEquals("name not equal", TEST_NAME, name);
-		
+
 		// check arrays
 		List<CharSequence> sl = u.getTypeArrayString();
 		assertEquals("element 0 not equal", TEST_ARRAY_STRING_1, sl.get(0).toString());
 		assertEquals("element 1 not equal", TEST_ARRAY_STRING_2, sl.get(1).toString());
-		
+
 		List<Boolean> bl = u.getTypeArrayBoolean();
 		assertEquals("element 0 not equal", TEST_ARRAY_BOOLEAN_1, bl.get(0));
 		assertEquals("element 1 not equal", TEST_ARRAY_BOOLEAN_2, bl.get(1));
-		
+
 		// check enums
 		Colors enumValue = u.getTypeEnum();
 		assertEquals("enum not equal", TEST_ENUM_COLOR, enumValue);
-		
+
 		// check maps
 		Map<CharSequence, Long> lm = u.getTypeMap();
 		assertEquals("map value of key 1 not equal", TEST_MAP_VALUE1, lm.get(new Utf8(TEST_MAP_KEY1)).longValue());
 		assertEquals("map value of key 2 not equal", TEST_MAP_VALUE2, lm.get(new Utf8(TEST_MAP_KEY2)).longValue());
-		
+
 		assertFalse("expecting second element", format.reachedEnd());
 		assertNotNull("expecting second element", format.nextRecord(u));
-		
+
 		assertNull(format.nextRecord(u));
 		assertTrue(format.reachedEnd());
-		
+
 		format.close();
 	}
 
@@ -274,7 +272,7 @@ public class AvroRecordInputFormatTest {
 	 * However, if generated classes are not available, one can also use GenericData.Record.
 	 * It is an untyped key-value record which is using a schema to validate the correctness of the data.
 	 *
-	 * It is not recommended to use GenericData.Record with Flink. Use generated POJOs instead.
+	 * <p>It is not recommended to use GenericData.Record with Flink. Use generated POJOs instead.
 	 */
 	@Test
 	public void testDeserializeToGenericType() throws IOException {
@@ -284,7 +282,7 @@ public class AvroRecordInputFormatTest {
 			// initialize Record by reading it from disk (that's easier than creating it by hand)
 			GenericData.Record rec = new GenericData.Record(userSchema);
 			dataFileReader.next(rec);
-			
+
 			// check if record has been read correctly
 			assertNotNull(rec);
 			assertEquals("name not equal", TEST_NAME, rec.get("name").toString());
@@ -296,7 +294,7 @@ public class AvroRecordInputFormatTest {
 
 			ExecutionConfig ec = new ExecutionConfig();
 			Assert.assertEquals(GenericTypeInfo.class, te.getClass());
-			
+
 			Serializers.recursivelyRegisterType(te.getTypeClass(), ec, new HashSet<Class<?>>());
 
 			TypeSerializer<GenericData.Record> tser = te.createSerializer(ec);
@@ -312,8 +310,7 @@ public class AvroRecordInputFormatTest {
 
 			GenericData.Record newRec;
 			try (DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(
-					new ByteArrayInputStream(out.toByteArray())))
-			{
+					new ByteArrayInputStream(out.toByteArray()))) {
 				newRec = tser.deserialize(inView);
 			}
 
@@ -324,7 +321,7 @@ public class AvroRecordInputFormatTest {
 			assertEquals(null, newRec.get("type_long_test"));
 		}
 	}
-		
+
 	/**
 	 * This test validates proper serialization with specific (generated POJO) types.
 	 */
@@ -355,8 +352,7 @@ public class AvroRecordInputFormatTest {
 
 			User newRec;
 			try (DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(
-					new ByteArrayInputStream(out.toByteArray())))
-			{
+					new ByteArrayInputStream(out.toByteArray()))) {
 				newRec = tser.deserialize(inView);
 			}
 
@@ -370,9 +366,8 @@ public class AvroRecordInputFormatTest {
 	/**
 	 * Test if the AvroInputFormat is able to properly read data from an Avro
 	 * file as a GenericRecord.
-	 * 
-	 * @throws IOException,
-	 *             if there is an exception
+	 *
+	 * @throws IOException
 	 */
 	@Test
 	public void testDeserialisationGenericRecord() throws IOException {
@@ -385,8 +380,8 @@ public class AvroRecordInputFormatTest {
 	}
 
 	/**
-	 * Helper method to test GenericRecord serialisation
-	 * 
+	 * Helper method to test GenericRecord serialisation.
+	 *
 	 * @param format
 	 *            the format to test
 	 * @param parameters
@@ -441,10 +436,9 @@ public class AvroRecordInputFormatTest {
 
 	/**
 	 * Test if the AvroInputFormat is able to properly read data from an avro
-	 * file as a GenericRecord
-	 * 
-	 * @throws IOException,
-	 *             if there is an error
+	 * file as a GenericRecord.
+	 *
+	 * @throws IOException if there is an error
 	 */
 	@Test
 	public void testDeserialisationGenericRecordReuseAvroValueFalse() throws IOException {

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroSplittableInputFormatTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroSplittableInputFormatTest.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroSplittableInputFormatTest.java
index 37a83d1..6401a87 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroSplittableInputFormatTest.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/AvroSplittableInputFormatTest.java
@@ -18,9 +18,6 @@
 
 package org.apache.flink.api.io.avro;
 
-import org.apache.avro.file.DataFileWriter;
-import org.apache.avro.io.DatumWriter;
-import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.flink.api.io.avro.generated.Address;
 import org.apache.flink.api.io.avro.generated.Colors;
 import org.apache.flink.api.io.avro.generated.Fixed16;
@@ -30,6 +27,10 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.fs.FileInputSplit;
 import org.apache.flink.core.fs.Path;
+
+import org.apache.avro.file.DataFileWriter;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.specific.SpecificDatumWriter;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -49,56 +50,55 @@ import static org.junit.Assert.assertEquals;
  * http://avro.apache.org/docs/current/gettingstartedjava.html
  */
 public class AvroSplittableInputFormatTest {
-	
+
 	private File testFile;
-	
-	final static String TEST_NAME = "Alyssa";
-	
-	final static String TEST_ARRAY_STRING_1 = "ELEMENT 1";
-	final static String TEST_ARRAY_STRING_2 = "ELEMENT 2";
-	
-	final static boolean TEST_ARRAY_BOOLEAN_1 = true;
-	final static boolean TEST_ARRAY_BOOLEAN_2 = false;
-	
-	final static Colors TEST_ENUM_COLOR = Colors.GREEN;
-	
-	final static String TEST_MAP_KEY1 = "KEY 1";
-	final static long TEST_MAP_VALUE1 = 8546456L;
-	final static String TEST_MAP_KEY2 = "KEY 2";
-	final static long TEST_MAP_VALUE2 = 17554L;
-
-	final static Integer TEST_NUM = new Integer(239);
-	final static String TEST_STREET = "Baker Street";
-	final static String TEST_CITY = "London";
-	final static String TEST_STATE = "London";
-	final static String TEST_ZIP = "NW1 6XE";
-	
-	final static int NUM_RECORDS = 5000;
+
+	static final String TEST_NAME = "Alyssa";
+
+	static final String TEST_ARRAY_STRING_1 = "ELEMENT 1";
+	static final String TEST_ARRAY_STRING_2 = "ELEMENT 2";
+
+	static final boolean TEST_ARRAY_BOOLEAN_1 = true;
+	static final boolean TEST_ARRAY_BOOLEAN_2 = false;
+
+	static final Colors TEST_ENUM_COLOR = Colors.GREEN;
+
+	static final String TEST_MAP_KEY1 = "KEY 1";
+	static final long TEST_MAP_VALUE1 = 8546456L;
+	static final String TEST_MAP_KEY2 = "KEY 2";
+	static final long TEST_MAP_VALUE2 = 17554L;
+
+	static final Integer TEST_NUM = new Integer(239);
+	static final String TEST_STREET = "Baker Street";
+	static final String TEST_CITY = "London";
+	static final String TEST_STATE = "London";
+	static final String TEST_ZIP = "NW1 6XE";
+
+	static final int NUM_RECORDS = 5000;
 
 	@Before
 	public void createFiles() throws IOException {
 		testFile = File.createTempFile("AvroSplittableInputFormatTest", null);
-		
+
 		ArrayList<CharSequence> stringArray = new ArrayList<CharSequence>();
 		stringArray.add(TEST_ARRAY_STRING_1);
 		stringArray.add(TEST_ARRAY_STRING_2);
-		
+
 		ArrayList<Boolean> booleanArray = new ArrayList<Boolean>();
 		booleanArray.add(TEST_ARRAY_BOOLEAN_1);
 		booleanArray.add(TEST_ARRAY_BOOLEAN_2);
-		
+
 		HashMap<CharSequence, Long> longMap = new HashMap<CharSequence, Long>();
 		longMap.put(TEST_MAP_KEY1, TEST_MAP_VALUE1);
 		longMap.put(TEST_MAP_KEY2, TEST_MAP_VALUE2);
-		
+
 		Address addr = new Address();
 		addr.setNum(new Integer(TEST_NUM));
 		addr.setStreet(TEST_STREET);
 		addr.setCity(TEST_CITY);
 		addr.setState(TEST_STATE);
 		addr.setZip(TEST_ZIP);
-		
-		
+
 		User user1 = new User();
 		user1.setName(TEST_NAME);
 		user1.setFavoriteNumber(256);
@@ -109,29 +109,28 @@ public class AvroSplittableInputFormatTest {
 		user1.setTypeEnum(TEST_ENUM_COLOR);
 		user1.setTypeMap(longMap);
 		user1.setTypeNested(addr);
-		
+
 		// Construct via builder
 		User user2 = User.newBuilder()
-		             .setName(TEST_NAME)
-		             .setFavoriteColor("blue")
-		             .setFavoriteNumber(null)
-		             .setTypeBoolTest(false)
-		             .setTypeDoubleTest(1.337d)
-		             .setTypeNullTest(null)
-		             .setTypeLongTest(1337L)
-		             .setTypeArrayString(new ArrayList<CharSequence>())
-		             .setTypeArrayBoolean(new ArrayList<Boolean>())
-		             .setTypeNullableArray(null)
-		             .setTypeEnum(Colors.RED)
-		             .setTypeMap(new HashMap<CharSequence, Long>())
-					 .setTypeFixed(new Fixed16())
-					 .setTypeUnion(123L)
+				.setName(TEST_NAME)
+				.setFavoriteColor("blue")
+				.setFavoriteNumber(null)
+				.setTypeBoolTest(false)
+				.setTypeDoubleTest(1.337d)
+				.setTypeNullTest(null)
+				.setTypeLongTest(1337L)
+				.setTypeArrayString(new ArrayList<CharSequence>())
+				.setTypeArrayBoolean(new ArrayList<Boolean>())
+				.setTypeNullableArray(null)
+				.setTypeEnum(Colors.RED)
+				.setTypeMap(new HashMap<CharSequence, Long>())
+				.setTypeFixed(new Fixed16())
+				.setTypeUnion(123L)
 				.setTypeNested(
 						Address.newBuilder().setNum(TEST_NUM).setStreet(TEST_STREET)
 								.setCity(TEST_CITY).setState(TEST_STATE).setZip(TEST_ZIP)
 								.build())
-
-		             .build();
+				.build();
 		DatumWriter<User> userDatumWriter = new SpecificDatumWriter<User>(User.class);
 		DataFileWriter<User> dataFileWriter = new DataFileWriter<User>(userDatumWriter);
 		dataFileWriter.create(user1.getSchema(), testFile);
@@ -139,7 +138,7 @@ public class AvroSplittableInputFormatTest {
 		dataFileWriter.append(user2);
 
 		Random rnd = new Random(1337);
-		for(int i = 0; i < NUM_RECORDS -2 ; i++) {
+		for (int i = 0; i < NUM_RECORDS - 2; i++) {
 			User user = new User();
 			user.setName(TEST_NAME + rnd.nextInt());
 			user.setFavoriteNumber(rnd.nextInt());
@@ -161,21 +160,21 @@ public class AvroSplittableInputFormatTest {
 		}
 		dataFileWriter.close();
 	}
-	
+
 	@Test
 	public void testSplittedIF() throws IOException {
 		Configuration parameters = new Configuration();
-		
+
 		AvroInputFormat<User> format = new AvroInputFormat<User>(new Path(testFile.getAbsolutePath()), User.class);
 
 		format.configure(parameters);
 		FileInputSplit[] splits = format.createInputSplits(4);
 		assertEquals(splits.length, 4);
 		int elements = 0;
-		int elementsPerSplit[] = new int[4];
-		for(int i = 0; i < splits.length; i++) {
+		int[] elementsPerSplit = new int[4];
+		for (int i = 0; i < splits.length; i++) {
 			format.open(splits[i]);
-			while(!format.reachedEnd()) {
+			while (!format.reachedEnd()) {
 				User u = format.nextRecord(null);
 				Assert.assertTrue(u.getName().toString().startsWith(TEST_NAME));
 				elements++;
@@ -205,15 +204,15 @@ public class AvroSplittableInputFormatTest {
 		assertEquals(splits.length, 4);
 
 		int elements = 0;
-		int elementsPerSplit[] = new int[4];
-		for(int i = 0; i < splits.length; i++) {
+		int[] elementsPerSplit = new int[4];
+		for (int i = 0; i < splits.length; i++) {
 			format.reopen(splits[i], format.getCurrentState());
-			while(!format.reachedEnd()) {
+			while (!format.reachedEnd()) {
 				User u = format.nextRecord(null);
 				Assert.assertTrue(u.getName().toString().startsWith(TEST_NAME));
 				elements++;
 
-				if(format.getRecordsReadFromBlock() == recordsUntilCheckpoint) {
+				if (format.getRecordsReadFromBlock() == recordsUntilCheckpoint) {
 
 					// do the whole checkpoint-restore procedure and see if we pick up from where we left off.
 					Tuple2<Long, Long> state = format.getCurrentState();
@@ -251,15 +250,15 @@ public class AvroSplittableInputFormatTest {
 		assertEquals(splits.length, 4);
 
 		int elements = 0;
-		int elementsPerSplit[] = new int[4];
-		for(int i = 0; i < splits.length; i++) {
+		int[] elementsPerSplit = new int[4];
+		for (int i = 0; i < splits.length; i++) {
 			format.open(splits[i]);
-			while(!format.reachedEnd()) {
+			while (!format.reachedEnd()) {
 				User u = format.nextRecord(null);
 				Assert.assertTrue(u.getName().toString().startsWith(TEST_NAME));
 				elements++;
 
-				if(format.getRecordsReadFromBlock() == recordsUntilCheckpoint) {
+				if (format.getRecordsReadFromBlock() == recordsUntilCheckpoint) {
 
 					// do the whole checkpoint-restore procedure and see if we pick up from where we left off.
 					Tuple2<Long, Long> state = format.getCurrentState();
@@ -305,12 +304,12 @@ public class AvroSplittableInputFormatTest {
 		int elementsPerSplit[] = new int[4];
 		int cnt = 0;
 		int i = 0;
-		for(InputSplit s:sp) {
+		for (InputSplit s:sp) {
 			RecordReader<AvroWrapper<User>, NullWritable> r = format.getRecordReader(s, jf, new HadoopDummyReporter());
 			AvroWrapper<User> k = r.createKey();
 			NullWritable v = r.createValue();
 
-			while(r.next(k,v)) {
+			while (r.next(k, v)) {
 				cnt++;
 				elementsPerSplit[i]++;
 			}
@@ -318,7 +317,7 @@ public class AvroSplittableInputFormatTest {
 		}
 		System.out.println("Status "+Arrays.toString(elementsPerSplit));
 	} **/
-	
+
 	@After
 	public void deleteFiles() {
 		testFile.delete();

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/example/AvroTypeExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/example/AvroTypeExample.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/example/AvroTypeExample.java
index 5a21691..96ffb7f 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/example/AvroTypeExample.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/io/avro/example/AvroTypeExample.java
@@ -18,9 +18,6 @@
 
 package org.apache.flink.api.io.avro.example;
 
-import java.io.IOException;
-import java.util.Random;
-
 import org.apache.flink.api.common.functions.GroupReduceFunction;
 import org.apache.flink.api.common.functions.MapFunction;
 import org.apache.flink.api.common.io.GenericInputFormat;
@@ -29,10 +26,15 @@ import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.util.Collector;
 
+import java.io.IOException;
+import java.util.Random;
+
+/**
+ * Example that shows how to use an Avro typea in a program.
+ */
 @SuppressWarnings("serial")
 public class AvroTypeExample {
-	
-	
+
 	public static void main(String[] args) throws Exception {
 
 		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
@@ -45,49 +47,45 @@ public class AvroTypeExample {
 			.reduceGroup(new ConcatenatingReducer())
 			.print();
 	}
-	
-	
-	public static final class NumberExtractingMapper implements MapFunction<User, Tuple2<User, Integer>> {
-		
+
+	private static final class NumberExtractingMapper implements MapFunction<User, Tuple2<User, Integer>> {
+
 		@Override
 		public Tuple2<User, Integer> map(User user) {
 			return new Tuple2<User, Integer>(user, user.getFavoriteNumber());
 		}
 	}
-	
-	
-	public static final class ConcatenatingReducer implements GroupReduceFunction<Tuple2<User, Integer>, Tuple2<Integer, String>> {
+
+	private static final class ConcatenatingReducer implements GroupReduceFunction<Tuple2<User, Integer>, Tuple2<Integer, String>> {
 
 		@Override
 		public void reduce(Iterable<Tuple2<User, Integer>> values, Collector<Tuple2<Integer, String>> out) throws Exception {
 			int number = 0;
 			StringBuilder colors = new StringBuilder();
-			
+
 			for (Tuple2<User, Integer> u : values) {
 				number = u.f1;
 				colors.append(u.f0.getFavoriteColor()).append(" - ");
 			}
-			
+
 			colors.setLength(colors.length() - 3);
 			out.collect(new Tuple2<Integer, String>(number, colors.toString()));
 		}
 	}
-	
-	
-	public static final class UserGeneratingInputFormat extends GenericInputFormat<User> {
+
+	private static final class UserGeneratingInputFormat extends GenericInputFormat<User> {
 
 		private static final long serialVersionUID = 1L;
-		
+
 		private static final int NUM = 100;
-		
+
 		private final Random rnd = new Random(32498562304986L);
-		
+
 		private static final String[] NAMES = { "Peter", "Bob", "Liddy", "Alexander", "Stan" };
-		
+
 		private static final String[] COLORS = { "mauve", "crimson", "copper", "sky", "grass" };
-		
+
 		private int count;
-		
 
 		@Override
 		public boolean reachedEnd() throws IOException {
@@ -97,7 +95,7 @@ public class AvroTypeExample {
 		@Override
 		public User nextRecord(User reuse) throws IOException {
 			count++;
-			
+
 			User u = new User();
 			u.setName(NAMES[rnd.nextInt(NAMES.length)]);
 			u.setFavoriteColor(COLORS[rnd.nextInt(COLORS.length)]);

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroInputFormatTypeExtractionTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroInputFormatTypeExtractionTest.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroInputFormatTypeExtractionTest.java
index e245026..5ae88ca 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroInputFormatTypeExtractionTest.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroInputFormatTypeExtractionTest.java
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.api.java.io;
 
 import org.apache.flink.api.common.io.InputFormat;
@@ -26,9 +25,13 @@ import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.typeutils.PojoTypeInfo;
 import org.apache.flink.api.java.typeutils.TypeExtractor;
 import org.apache.flink.core.fs.Path;
+
 import org.junit.Assert;
 import org.junit.Test;
 
+/**
+ * Tests for the type extraction of the {@link AvroInputFormat}.
+ */
 public class AvroInputFormatTypeExtractionTest {
 
 	@Test
@@ -42,7 +45,6 @@ public class AvroInputFormatTypeExtractionTest {
 			DataSet<MyAvroType> input = env.createInput(format);
 			TypeInformation<?> typeInfoDataSet = input.getType();
 
-
 			Assert.assertTrue(typeInfoDirect instanceof PojoTypeInfo);
 			Assert.assertTrue(typeInfoDataSet instanceof PojoTypeInfo);
 
@@ -54,6 +56,9 @@ public class AvroInputFormatTypeExtractionTest {
 		}
 	}
 
+	/**
+	 * Test type.
+	 */
 	public static final class MyAvroType {
 
 		public String theString;

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroOutputFormatTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroOutputFormatTest.java b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroOutputFormatTest.java
index f843d3b..87334a7 100644
--- a/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroOutputFormatTest.java
+++ b/flink-connectors/flink-avro/src/test/java/org/apache/flink/api/java/io/AvroOutputFormatTest.java
@@ -18,9 +18,14 @@
 
 package org.apache.flink.api.java.io;
 
-import static org.apache.flink.api.java.io.AvroOutputFormat.Codec;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.apache.flink.api.io.avro.example.User;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.Path;
+
+import org.apache.avro.Schema;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -29,16 +34,12 @@ import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 
-import org.apache.avro.Schema;
-import org.apache.flink.api.io.avro.example.User;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.core.fs.FileSystem;
-import org.apache.flink.core.fs.Path;
-import org.junit.Test;
-import org.mockito.internal.util.reflection.Whitebox;
+import static org.apache.flink.api.java.io.AvroOutputFormat.Codec;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
- * Tests for {@link AvroOutputFormat}
+ * Tests for {@link AvroOutputFormat}.
  */
 public class AvroOutputFormatTest {
 
@@ -116,12 +117,12 @@ public class AvroOutputFormatTest {
 	@Test
 	public void testCompression() throws Exception {
 		// given
-		final Path outputPath = new Path(File.createTempFile("avro-output-file","avro").getAbsolutePath());
-		final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(outputPath,User.class);
+		final Path outputPath = new Path(File.createTempFile("avro-output-file", "avro").getAbsolutePath());
+		final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(outputPath, User.class);
 		outputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);
 
-		final Path compressedOutputPath = new Path(File.createTempFile("avro-output-file","compressed.avro").getAbsolutePath());
-		final AvroOutputFormat<User> compressedOutputFormat = new AvroOutputFormat<>(compressedOutputPath,User.class);
+		final Path compressedOutputPath = new Path(File.createTempFile("avro-output-file", "compressed.avro").getAbsolutePath());
+		final AvroOutputFormat<User> compressedOutputFormat = new AvroOutputFormat<>(compressedOutputPath, User.class);
 		compressedOutputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);
 		compressedOutputFormat.setCodec(Codec.SNAPPY);
 
@@ -144,9 +145,9 @@ public class AvroOutputFormatTest {
 
 	private void output(final AvroOutputFormat<User> outputFormat) throws IOException {
 		outputFormat.configure(new Configuration());
-		outputFormat.open(1,1);
+		outputFormat.open(1, 1);
 		for (int i = 0; i < 100; i++) {
-			outputFormat.writeRecord(new User("testUser",1,"blue"));
+			outputFormat.writeRecord(new User("testUser", 1, "blue"));
 		}
 		outputFormat.close();
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/b58545ec/flink-connectors/flink-avro/src/test/resources/avro/user.avsc
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-avro/src/test/resources/avro/user.avsc b/flink-connectors/flink-avro/src/test/resources/avro/user.avsc
index 02c11af..ab8adf5 100644
--- a/flink-connectors/flink-avro/src/test/resources/avro/user.avsc
+++ b/flink-connectors/flink-avro/src/test/resources/avro/user.avsc
@@ -21,8 +21,8 @@
      {"name": "type_double_test", "type": "double"},
      {"name": "type_null_test", "type": ["null"]},
      {"name": "type_bool_test", "type": ["boolean"]},
-     {"name": "type_array_string", "type" : {"type" : "array", "items" : "string"}},  
-     {"name": "type_array_boolean", "type" : {"type" : "array", "items" : "boolean"}}, 
+     {"name": "type_array_string", "type" : {"type" : "array", "items" : "string"}},
+     {"name": "type_array_boolean", "type" : {"type" : "array", "items" : "boolean"}},
      {"name": "type_nullable_array", "type": ["null", {"type":"array", "items":"string"}], "default":null},
      {"name": "type_enum", "type": {"type": "enum", "name": "Colors", "symbols" : ["RED", "GREEN", "BLUE"]}},
      {"name": "type_map", "type": {"type": "map", "values": "long"}},


[08/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kafka*

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
index ac278fb..03a23f5 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
@@ -18,14 +18,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import kafka.consumer.Consumer;
-import kafka.consumer.ConsumerConfig;
-import kafka.consumer.ConsumerIterator;
-import kafka.consumer.KafkaStream;
-import kafka.javaapi.consumer.ConsumerConnector;
-import kafka.message.MessageAndMetadata;
-import kafka.server.KafkaServer;
-import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.JobExecutionResult;
 import org.apache.flink.api.common.functions.FlatMapFunction;
@@ -82,6 +74,15 @@ import org.apache.flink.test.util.SuccessException;
 import org.apache.flink.testutils.junit.RetryOnException;
 import org.apache.flink.testutils.junit.RetryRule;
 import org.apache.flink.util.Collector;
+
+import kafka.consumer.Consumer;
+import kafka.consumer.ConsumerConfig;
+import kafka.consumer.ConsumerIterator;
+import kafka.consumer.KafkaStream;
+import kafka.javaapi.consumer.ConsumerConnector;
+import kafka.message.MessageAndMetadata;
+import kafka.server.KafkaServer;
+import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.apache.kafka.common.errors.TimeoutException;
 import org.junit.Assert;
@@ -90,6 +91,7 @@ import org.junit.Rule;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -114,14 +116,15 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-
+/**
+ * Abstract test base for all Kafka consumer tests.
+ */
 @SuppressWarnings("serial")
 public abstract class KafkaConsumerTestBase extends KafkaTestBase {
-	
+
 	@Rule
 	public RetryRule retryRule = new RetryRule();
 
-
 	// ------------------------------------------------------------------------
 	//  Common Test Preparation
 	// ------------------------------------------------------------------------
@@ -134,8 +137,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	public void ensureNoJobIsLingering() throws Exception {
 		JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout));
 	}
-	
-	
+
 	// ------------------------------------------------------------------------
 	//  Suite of Tests
 	//
@@ -146,7 +148,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 
 	/**
 	 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
-	 * and a wrong broker was specified
+	 * and a wrong broker was specified.
 	 *
 	 * @throws Exception
 	 */
@@ -173,8 +175,8 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			DataStream<String> stream = see.addSource(source);
 			stream.print();
 			see.execute("No broker test");
-		} catch(JobExecutionException jee) {
-			if(kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10")) {
+		} catch (JobExecutionException jee) {
+			if (kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10")) {
 				assertTrue(jee.getCause() instanceof TimeoutException);
 
 				TimeoutException te = (TimeoutException) jee.getCause();
@@ -191,7 +193,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	}
 
 	/**
-	 * Ensures that the committed offsets to Kafka are the offsets of "the next record to process"
+	 * Ensures that the committed offsets to Kafka are the offsets of "the next record to process".
 	 */
 	public void runCommitOffsetsToKafka() throws Exception {
 		// 3 partitions with 50 records each (0-49, so the expected commit offset of each partition should be 50)
@@ -354,13 +356,13 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	 * This test ensures that when the consumers retrieve some start offset from kafka (earliest, latest), that this offset
 	 * is committed to Kafka, even if some partitions are not read.
 	 *
-	 * Test:
+	 * <p>Test:
 	 * - Create 3 partitions
 	 * - write 50 messages into each.
 	 * - Start three consumers with auto.offset.reset='latest' and wait until they committed into Kafka.
 	 * - Check if the offsets in Kafka are set to 50 for the three partitions
 	 *
-	 * See FLINK-3440 as well
+	 * <p>See FLINK-3440 as well
 	 */
 	public void runAutoOffsetRetrievalAndCommitToKafka() throws Exception {
 		// 3 partitions with 50 records each (0-49, so the expected commit offset of each partition should be 50)
@@ -518,7 +520,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 
 		env
 			.addSource(latestReadingConsumer).setParallelism(parallelism)
-			.flatMap(new FlatMapFunction<Tuple2<Integer,Integer>, Object>() {
+			.flatMap(new FlatMapFunction<Tuple2<Integer, Integer>, Object>() {
 				@Override
 				public void flatMap(Tuple2<Integer, Integer> value, Collector<Object> out) throws Exception {
 					if (value.f1 - recordsInEachPartition < 0) {
@@ -605,12 +607,12 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	 * This test ensures that the consumer correctly uses group offsets in Kafka, and defaults to "auto.offset.reset"
 	 * behaviour when necessary, when explicitly configured to start from group offsets.
 	 *
-	 * The partitions and their committed group offsets are setup as:
+	 * <p>The partitions and their committed group offsets are setup as:
 	 * 	partition 0 --> committed offset 23
 	 * 	partition 1 --> no commit offset
 	 * 	partition 2 --> committed offset 43
 	 *
-	 * When configured to start from group offsets, each partition should read:
+	 * <p>When configured to start from group offsets, each partition should read:
 	 * 	partition 0 --> start from offset 23, read to offset 49 (27 records)
 	 * 	partition 1 --> default to "auto.offset.reset" (set to earliest), so start from offset 0, read to offset 49 (50 records)
 	 * 	partition 2 --> start from offset 43, read to offset 49 (7 records)
@@ -653,20 +655,20 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	 * start from specific offsets. For partitions which a specific offset can not be found for, the starting position
 	 * for them should fallback to the group offsets behaviour.
 	 *
-	 * 4 partitions will have 50 records with offsets 0 to 49. The supplied specific offsets map is:
+	 * <p>4 partitions will have 50 records with offsets 0 to 49. The supplied specific offsets map is:
 	 * 	partition 0 --> start from offset 19
 	 * 	partition 1 --> not set
 	 * 	partition 2 --> start from offset 22
 	 * 	partition 3 --> not set
 	 * 	partition 4 --> start from offset 26 (this should be ignored because the partition does not exist)
 	 *
-	 * The partitions and their committed group offsets are setup as:
+	 * <p>The partitions and their committed group offsets are setup as:
 	 * 	partition 0 --> committed offset 23
 	 * 	partition 1 --> committed offset 31
 	 * 	partition 2 --> committed offset 43
 	 * 	partition 3 --> no commit offset
 	 *
-	 * When configured to start from these specific offsets, each partition should read:
+	 * <p>When configured to start from these specific offsets, each partition should read:
 	 * 	partition 0 --> start from offset 19, read to offset 49 (31 records)
 	 * 	partition 1 --> fallback to group offsets, so start from offset 31, read to offset 49 (19 records)
 	 * 	partition 2 --> start from offset 22, read to offset 49 (28 records)
@@ -711,7 +713,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		kafkaOffsetHandler.close();
 		deleteTestTopic(topicName);
 	}
-	
+
 	/**
 	 * Ensure Kafka is working on both producer and consumer side.
 	 * This executes a job that contains two Flink pipelines.
@@ -719,22 +721,22 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	 * <pre>
 	 * (generator source) --> (kafka sink)-[KAFKA-TOPIC]-(kafka source) --> (validating sink)
 	 * </pre>
-	 * 
-	 * We need to externally retry this test. We cannot let Flink's retry mechanism do it, because the Kafka producer
+	 *
+	 * <p>We need to externally retry this test. We cannot let Flink's retry mechanism do it, because the Kafka producer
 	 * does not guarantee exactly-once output. Hence a recovery would introduce duplicates that
 	 * cause the test to fail.
 	 *
-	 * This test also ensures that FLINK-3156 doesn't happen again:
+	 * <p>This test also ensures that FLINK-3156 doesn't happen again:
 	 *
-	 * The following situation caused a NPE in the FlinkKafkaConsumer
+	 * <p>The following situation caused a NPE in the FlinkKafkaConsumer
 	 *
-	 * topic-1 <-- elements are only produced into topic1.
+	 * <p>topic-1 <-- elements are only produced into topic1.
 	 * topic-2
 	 *
-	 * Therefore, this test is consuming as well from an empty topic.
+	 * <p>Therefore, this test is consuming as well from an empty topic.
 	 *
 	 */
-	@RetryOnException(times=2, exception=kafka.common.NotLeaderForPartitionException.class)
+	@RetryOnException(times = 2, exception = kafka.common.NotLeaderForPartitionException.class)
 	public void runSimpleConcurrentProducerConsumerTopology() throws Exception {
 		final String topic = "concurrentProducerConsumerTopic_" + UUID.randomUUID().toString();
 		final String additionalEmptyTopic = "additionalEmptyTopic_" + UUID.randomUUID().toString();
@@ -763,7 +765,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 
 		// ----------- add producer dataflow ----------
 
-		DataStream<Tuple2<Long, String>> stream = env.addSource(new RichParallelSourceFunction<Tuple2<Long,String>>() {
+		DataStream<Tuple2<Long, String>> stream = env.addSource(new RichParallelSourceFunction<Tuple2<Long, String>>() {
 
 			private boolean running = true;
 
@@ -772,7 +774,6 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 				int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
 				int limit = cnt + elementsPerPartition;
 
-
 				while (running && cnt < limit) {
 					ctx.collect(new Tuple2<>(1000L + cnt, "kafka-" + cnt));
 					cnt++;
@@ -1002,13 +1003,11 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		FailingIdentityMapper.failedBefore = false;
 		tryExecute(env, "multi-source-one-partitions exactly once test");
 
-
 		deleteTestTopic(topic);
 	}
-	
-	
+
 	/**
-	 * Tests that the source can be properly canceled when reading full partitions. 
+	 * Tests that the source can be properly canceled when reading full partitions.
 	 */
 	public void runCancelingOnFullInputTest() throws Exception {
 		final String topic = "cancelingOnFullTopic";
@@ -1056,7 +1055,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		Thread.sleep(2000);
 
 		Throwable failueCause = jobError.get();
-		if(failueCause != null) {
+		if (failueCause != null) {
 			failueCause.printStackTrace();
 			Assert.fail("Test failed prematurely with: " + failueCause.getMessage());
 		}
@@ -1089,7 +1088,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	}
 
 	/**
-	 * Tests that the source can be properly canceled when reading empty partitions. 
+	 * Tests that the source can be properly canceled when reading empty partitions.
 	 */
 	public void runCancelingOnEmptyInputTest() throws Exception {
 		final String topic = "cancelingOnEmptyInputTopic";
@@ -1149,7 +1148,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	}
 
 	/**
-	 * Tests that the source can be properly canceled when reading full partitions. 
+	 * Tests that the source can be properly canceled when reading full partitions.
 	 */
 	public void runFailOnDeployTest() throws Exception {
 		final String topic = "failOnDeployTopic";
@@ -1198,19 +1197,19 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	}
 
 	/**
-	 * Test producing and consuming into multiple topics
+	 * Test producing and consuming into multiple topics.
 	 * @throws Exception
 	 */
 	public void runProduceConsumeMultipleTopics() throws Exception {
-		final int NUM_TOPICS = 5;
-		final int NUM_ELEMENTS = 20;
+		final int numTopics = 5;
+		final int numElements = 20;
 
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
 		env.getConfig().disableSysoutLogging();
-		
+
 		// create topics with content
 		final List<String> topics = new ArrayList<>();
-		for (int i = 0; i < NUM_TOPICS; i++) {
+		for (int i = 0; i < numTopics; i++) {
 			final String topic = "topic-" + i;
 			topics.add(topic);
 			// create topic
@@ -1227,8 +1226,8 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			public void run(SourceContext<Tuple3<Integer, Integer, String>> ctx) throws Exception {
 				int partition = getRuntimeContext().getIndexOfThisSubtask();
 
-				for (int topicId = 0; topicId < NUM_TOPICS; topicId++) {
-					for (int i = 0; i < NUM_ELEMENTS; i++) {
+				for (int topicId = 0; topicId < numTopics; topicId++) {
+					for (int i = 0; i < numElements; i++) {
 						ctx.collect(new Tuple3<>(partition, i, "topic-" + topicId));
 					}
 				}
@@ -1255,7 +1254,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		stream = env.addSource(kafkaServer.getConsumer(topics, schema, props));
 
 		stream.flatMap(new FlatMapFunction<Tuple3<Integer, Integer, String>, Integer>() {
-			Map<String, Integer> countPerTopic = new HashMap<>(NUM_TOPICS);
+			Map<String, Integer> countPerTopic = new HashMap<>(numTopics);
 			@Override
 			public void flatMap(Tuple3<Integer, Integer, String> value, Collector<Integer> out) throws Exception {
 				Integer count = countPerTopic.get(value.f2);
@@ -1268,10 +1267,10 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 
 				// check map:
 				for (Map.Entry<String, Integer> el: countPerTopic.entrySet()) {
-					if (el.getValue() < NUM_ELEMENTS) {
+					if (el.getValue() < numElements) {
 						break; // not enough yet
 					}
-					if (el.getValue() > NUM_ELEMENTS) {
+					if (el.getValue() > numElements) {
 						throw new RuntimeException("There is a failure in the test. I've read " +
 								el.getValue() + " from topic " + el.getKey());
 					}
@@ -1283,17 +1282,17 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 
 		tryExecute(env, "Count elements from the topics");
 
-
 		// delete all topics again
-		for (int i = 0; i < NUM_TOPICS; i++) {
+		for (int i = 0; i < numTopics; i++) {
 			final String topic = "topic-" + i;
 			deleteTestTopic(topic);
 		}
 	}
 
 	/**
-	 * Test Flink's Kafka integration also with very big records (30MB)
-	 * see http://stackoverflow.com/questions/21020347/kafka-sending-a-15mb-message
+	 * Test Flink's Kafka integration also with very big records (30MB).
+	 *
+	 * <p>see http://stackoverflow.com/questions/21020347/kafka-sending-a-15mb-message
 	 *
 	 */
 	public void runBigRecordTestTopology() throws Exception {
@@ -1337,11 +1336,11 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 					if (elCnt == 11) {
 						throw new SuccessException();
 					} else {
-						throw new RuntimeException("There have been "+elCnt+" elements");
+						throw new RuntimeException("There have been " + elCnt + " elements");
 					}
 				}
 				if (elCnt > 10) {
-					throw new RuntimeException("More than 10 elements seen: "+elCnt);
+					throw new RuntimeException("More than 10 elements seen: " + elCnt);
 				}
 			}
 		});
@@ -1395,7 +1394,6 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		deleteTestTopic(topic);
 	}
 
-	
 	public void runBrokerFailureTest() throws Exception {
 		final String topic = "brokerFailureTestTopic";
 
@@ -1404,7 +1402,6 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		final int totalElements = parallelism * numElementsPerPartition;
 		final int failAfterElements = numElementsPerPartition / 3;
 
-
 		createTestTopic(topic, parallelism, 2);
 
 		DataGenerators.generateRandomizedIntegerSequence(
@@ -1417,7 +1414,6 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 
 		LOG.info("Leader to shutdown {}", leaderId);
 
-
 		// run the topology (the consumers must handle the failures)
 
 		DeserializationSchema<Integer> schema =
@@ -1450,7 +1446,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	public void runKeyValueTest() throws Exception {
 		final String topic = "keyvaluetest";
 		createTestTopic(topic, 1, 1);
-		final int ELEMENT_COUNT = 5000;
+		final int elementCount = 5000;
 
 		// ----------- Write some data into Kafka -------------------
 
@@ -1463,7 +1459,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			@Override
 			public void run(SourceContext<Tuple2<Long, PojoValue>> ctx) throws Exception {
 				Random rnd = new Random(1337);
-				for (long i = 0; i < ELEMENT_COUNT; i++) {
+				for (long i = 0; i < elementCount; i++) {
 					PojoValue pojo = new PojoValue();
 					pojo.when = new Date(rnd.nextLong());
 					pojo.lon = rnd.nextLong();
@@ -1473,6 +1469,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 					ctx.collect(new Tuple2<>(key, pojo));
 				}
 			}
+
 			@Override
 			public void cancel() {
 			}
@@ -1491,26 +1488,25 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		env.setRestartStrategy(RestartStrategies.noRestart());
 		env.getConfig().disableSysoutLogging();
 
-
 		KeyedDeserializationSchema<Tuple2<Long, PojoValue>> readSchema = new TypeInformationKeyValueSerializationSchema<>(Long.class, PojoValue.class, env.getConfig());
 
 		Properties props = new Properties();
 		props.putAll(standardProps);
 		props.putAll(secureProps);
 		DataStream<Tuple2<Long, PojoValue>> fromKafka = env.addSource(kafkaServer.getConsumer(topic, readSchema, props));
-		fromKafka.flatMap(new RichFlatMapFunction<Tuple2<Long,PojoValue>, Object>() {
+		fromKafka.flatMap(new RichFlatMapFunction<Tuple2<Long, PojoValue>, Object>() {
 			long counter = 0;
 			@Override
 			public void flatMap(Tuple2<Long, PojoValue> value, Collector<Object> out) throws Exception {
 				// the elements should be in order.
-				Assert.assertTrue("Wrong value " + value.f1.lat, value.f1.lat == counter );
+				Assert.assertTrue("Wrong value " + value.f1.lat, value.f1.lat == counter);
 				if (value.f1.lat % 2 == 0) {
 					assertNull("key was not null", value.f0);
 				} else {
 					Assert.assertTrue("Wrong value " + value.f0, value.f0 == counter);
 				}
 				counter++;
-				if (counter == ELEMENT_COUNT) {
+				if (counter == elementCount) {
 					// we got the right number of elements
 					throw new SuccessException();
 				}
@@ -1522,22 +1518,21 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		deleteTestTopic(topic);
 	}
 
-	public static class PojoValue {
+	private static class PojoValue {
 		public Date when;
 		public long lon;
 		public long lat;
 		public PojoValue() {}
 	}
 
-
 	/**
-	 * Test delete behavior and metrics for producer
+	 * Test delete behavior and metrics for producer.
 	 * @throws Exception
 	 */
 	public void runAllDeletesTest() throws Exception {
 		final String topic = "alldeletestest";
 		createTestTopic(topic, 1, 1);
-		final int ELEMENT_COUNT = 300;
+		final int elementCount = 300;
 
 		// ----------- Write some data into Kafka -------------------
 
@@ -1550,12 +1545,13 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			@Override
 			public void run(SourceContext<Tuple2<byte[], PojoValue>> ctx) throws Exception {
 				Random rnd = new Random(1337);
-				for (long i = 0; i < ELEMENT_COUNT; i++) {
+				for (long i = 0; i < elementCount; i++) {
 					final byte[] key = new byte[200];
 					rnd.nextBytes(key);
 					ctx.collect(new Tuple2<>(key, (PojoValue) null));
 				}
 			}
+
 			@Override
 			public void cancel() {
 			}
@@ -1589,7 +1585,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 				// ensure that deleted messages are passed as nulls
 				assertNull(value.f1);
 				counter++;
-				if (counter == ELEMENT_COUNT) {
+				if (counter == elementCount) {
 					// we got the right number of elements
 					throw new SuccessException();
 				}
@@ -1608,8 +1604,8 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 	 */
 	public void runEndOfStreamTest() throws Exception {
 
-		final int ELEMENT_COUNT = 300;
-		final String topic = writeSequence("testEndOfStream", ELEMENT_COUNT, 1, 1);
+		final int elementCount = 300;
+		final String topic = writeSequence("testEndOfStream", elementCount, 1, 1);
 
 		// read using custom schema
 		final StreamExecutionEnvironment env1 = StreamExecutionEnvironment.getExecutionEnvironment();
@@ -1621,21 +1617,21 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		props.putAll(standardProps);
 		props.putAll(secureProps);
 
-		DataStream<Tuple2<Integer, Integer>> fromKafka = env1.addSource(kafkaServer.getConsumer(topic, new FixedNumberDeserializationSchema(ELEMENT_COUNT), props));
-		fromKafka.flatMap(new FlatMapFunction<Tuple2<Integer,Integer>, Void>() {
+		DataStream<Tuple2<Integer, Integer>> fromKafka = env1.addSource(kafkaServer.getConsumer(topic, new FixedNumberDeserializationSchema(elementCount), props));
+		fromKafka.flatMap(new FlatMapFunction<Tuple2<Integer, Integer>, Void>() {
 			@Override
 			public void flatMap(Tuple2<Integer, Integer> value, Collector<Void> out) throws Exception {
 				// noop ;)
 			}
 		});
 
-		JobExecutionResult result = tryExecute(env1, "Consume " + ELEMENT_COUNT + " elements from Kafka");
+		JobExecutionResult result = tryExecute(env1, "Consume " + elementCount + " elements from Kafka");
 
 		deleteTestTopic(topic);
 	}
 
 	/**
-	 * Test metrics reporting for consumer
+	 * Test metrics reporting for consumer.
 	 *
 	 * @throws Exception
 	 */
@@ -1690,9 +1686,9 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 					kafkaServer.produceIntoKafka(fromGen, topic, new KeyedSerializationSchemaWrapper<>(schema), standardProps, null);
 
 					env1.execute("Metrics test job");
-				} catch(Throwable t) {
+				} catch (Throwable t) {
 					LOG.warn("Got exception during execution", t);
-					if(!(t instanceof JobCancellationException)) { // we'll cancel the job
+					if (!(t instanceof JobCancellationException)) { // we'll cancel the job
 						error.f0 = t;
 					}
 				}
@@ -1723,7 +1719,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 				// check that offsets are correctly reported
 				for (ObjectName object : offsetMetrics) {
 					Object offset = mBeanServer.getAttribute(object, "Value");
-					if((long) offset >= 0) {
+					if ((long) offset >= 0) {
 						numPosOffsets++;
 					}
 				}
@@ -1738,7 +1734,6 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			Set<ObjectName> producerMetrics = mBeanServer.queryNames(new ObjectName("*KafkaProducer*:*"), null);
 			Assert.assertTrue("No producer metrics found", producerMetrics.size() > 30);
 
-
 			LOG.info("Found all JMX metrics. Cancelling job.");
 		} finally {
 			// cancel
@@ -1755,12 +1750,11 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		deleteTestTopic(topic);
 	}
 
+	private static class FixedNumberDeserializationSchema implements DeserializationSchema<Tuple2<Integer, Integer>> {
 
-	public static class FixedNumberDeserializationSchema implements DeserializationSchema<Tuple2<Integer, Integer>> {
-		
 		final int finalCount;
 		int count = 0;
-		
+
 		TypeInformation<Tuple2<Integer, Integer>> ti = TypeInfoParser.parse("Tuple2<Integer, Integer>");
 		TypeSerializer<Tuple2<Integer, Integer>> ser = ti.createSerializer(new ExecutionConfig());
 
@@ -1785,7 +1779,6 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		}
 	}
 
-
 	// ------------------------------------------------------------------------
 	//  Reading writing test data sets
 	// ------------------------------------------------------------------------
@@ -1916,13 +1909,12 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			String baseTopicName,
 			final int numElements,
 			final int parallelism,
-			final int replicationFactor) throws Exception
-	{
+			final int replicationFactor) throws Exception {
 		LOG.info("\n===================================\n" +
 				"== Writing sequence of " + numElements + " into " + baseTopicName + " with p=" + parallelism + "\n" +
 				"===================================");
 
-		final TypeInformation<Tuple2<Integer, Integer>> resultType = 
+		final TypeInformation<Tuple2<Integer, Integer>> resultType =
 				TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {});
 
 		final KeyedSerializationSchema<Tuple2<Integer, Integer>> serSchema =
@@ -1932,15 +1924,15 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		final KeyedDeserializationSchema<Tuple2<Integer, Integer>> deserSchema =
 				new KeyedDeserializationSchemaWrapper<>(
 						new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig()));
-		
+
 		final int maxNumAttempts = 10;
 
 		for (int attempt = 1; attempt <= maxNumAttempts; attempt++) {
-			
+
 			final String topicName = baseTopicName + '-' + attempt;
-			
+
 			LOG.info("Writing attempt #1");
-			
+
 			// -------- Write the Sequence --------
 
 			createTestTopic(topicName, parallelism, replicationFactor);
@@ -1948,33 +1940,33 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.getExecutionEnvironment();
 			writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart());
 			writeEnv.getConfig().disableSysoutLogging();
-			
+
 			DataStream<Tuple2<Integer, Integer>> stream = writeEnv.addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() {
-	
+
 				private boolean running = true;
-	
+
 				@Override
 				public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
 					int cnt = 0;
 					int partition = getRuntimeContext().getIndexOfThisSubtask();
-	
+
 					while (running && cnt < numElements) {
 						ctx.collect(new Tuple2<>(partition, cnt));
 						cnt++;
 					}
 				}
-	
+
 				@Override
 				public void cancel() {
 					running = false;
 				}
 			}).setParallelism(parallelism);
-	
+
 			// the producer must not produce duplicates
 			Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
 			producerProperties.setProperty("retries", "0");
 			producerProperties.putAll(secureProps);
-			
+
 			kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2FlinkPartitioner(parallelism))
 					.setParallelism(parallelism);
 
@@ -1987,21 +1979,21 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 				JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout));
 				continue;
 			}
-			
+
 			LOG.info("Finished writing sequence");
 
 			// -------- Validate the Sequence --------
-			
+
 			// we need to validate the sequence, because kafka's producers are not exactly once
 			LOG.info("Validating sequence");
 
 			JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout));
-			
+
 			final StreamExecutionEnvironment readEnv = StreamExecutionEnvironment.getExecutionEnvironment();
 			readEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart());
 			readEnv.getConfig().disableSysoutLogging();
 			readEnv.setParallelism(parallelism);
-			
+
 			Properties readProps = (Properties) standardProps.clone();
 			readProps.setProperty("group.id", "flink-tests-validator");
 			readProps.putAll(secureProps);
@@ -2010,10 +2002,10 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			readEnv
 					.addSource(consumer)
 					.map(new RichMapFunction<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>() {
-						
+
 						private final int totalCount = parallelism * numElements;
 						private int count = 0;
-						
+
 						@Override
 						public Tuple2<Integer, Integer> map(Tuple2<Integer, Integer> value) throws Exception {
 							if (++count == totalCount) {
@@ -2024,9 +2016,9 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 						}
 					}).setParallelism(1)
 					.addSink(new DiscardingSink<Tuple2<Integer, Integer>>()).setParallelism(1);
-			
+
 			final AtomicReference<Throwable> errorRef = new AtomicReference<>();
-			
+
 			Thread runner = new Thread() {
 				@Override
 				public void run() {
@@ -2038,15 +2030,15 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 				}
 			};
 			runner.start();
-			
+
 			final long deadline = System.nanoTime() + 10_000_000_000L;
 			long delay;
 			while (runner.isAlive() && (delay = deadline - System.nanoTime()) > 0) {
-				runner.join(delay/1_000_000L);
+				runner.join(delay / 1_000_000L);
 			}
-			
+
 			boolean success;
-			
+
 			if (runner.isAlive()) {
 				// did not finish in time, maybe the producer dropped one or more records and
 				// the validation did not reach the exit point
@@ -2065,7 +2057,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			}
 
 			JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout));
-			
+
 			if (success) {
 				// everything is good!
 				return topicName;
@@ -2075,7 +2067,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 				// fall through the loop
 			}
 		}
-		
+
 		throw new Exception("Could not write a valid sequence to Kafka after " + maxNumAttempts + " attempts");
 	}
 
@@ -2090,28 +2082,28 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(config);
 		// we request only one stream per consumer instance. Kafka will make sure that each consumer group
 		// will see each message only once.
-		Map<String,Integer> topicCountMap = Collections.singletonMap(topicName, 1);
+		Map<String, Integer> topicCountMap = Collections.singletonMap(topicName, 1);
 		Map<String, List<KafkaStream<byte[], byte[]>>> streams = consumerConnector.createMessageStreams(topicCountMap);
 		if (streams.size() != 1) {
-			throw new RuntimeException("Expected only one message stream but got "+streams.size());
+			throw new RuntimeException("Expected only one message stream but got " + streams.size());
 		}
 		List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
 		if (kafkaStreams == null) {
-			throw new RuntimeException("Requested stream not available. Available streams: "+streams.toString());
+			throw new RuntimeException("Requested stream not available. Available streams: " + streams.toString());
 		}
 		if (kafkaStreams.size() != 1) {
-			throw new RuntimeException("Requested 1 stream from Kafka, bot got "+kafkaStreams.size()+" streams");
+			throw new RuntimeException("Requested 1 stream from Kafka, bot got " + kafkaStreams.size() + " streams");
 		}
 		LOG.info("Opening Consumer instance for topic '{}' on group '{}'", topicName, config.groupId());
 		ConsumerIterator<byte[], byte[]> iteratorToRead = kafkaStreams.get(0).iterator();
 
 		List<MessageAndMetadata<byte[], byte[]>> result = new ArrayList<>();
 		int read = 0;
-		while(iteratorToRead.hasNext()) {
+		while (iteratorToRead.hasNext()) {
 			read++;
 			result.add(iteratorToRead.next());
 			if (read == stopAfter) {
-				LOG.info("Read "+read+" elements");
+				LOG.info("Read " + read + " elements");
 				return result;
 			}
 		}
@@ -2131,12 +2123,11 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		}
 	}
 
-	private static void printTopic(String topicName, int elements,DeserializationSchema<?> deserializer) 
-			throws IOException
-	{
+	private static void printTopic(String topicName, int elements, DeserializationSchema<?> deserializer)
+			throws IOException {
 		// write the sequence to log for debugging purposes
 		Properties newProps = new Properties(standardProps);
-		newProps.setProperty("group.id", "topic-printer"+ UUID.randomUUID().toString());
+		newProps.setProperty("group.id", "topic-printer" + UUID.randomUUID().toString());
 		newProps.setProperty("auto.offset.reset", "smallest");
 		newProps.setProperty("zookeeper.connect", standardProps.getProperty("zookeeper.connect"));
 		newProps.putAll(secureProps);
@@ -2145,15 +2136,14 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		printTopic(topicName, printerConfig, deserializer, elements);
 	}
 
-
-	public static class BrokerKillingMapper<T> extends RichMapFunction<T,T>
+	private static class BrokerKillingMapper<T> extends RichMapFunction<T, T>
 			implements ListCheckpointed<Integer>, CheckpointListener {
 
 		private static final long serialVersionUID = 6334389850158707313L;
 
 		public static volatile boolean killedLeaderBefore;
 		public static volatile boolean hasBeenCheckpointedBeforeFailure;
-		
+
 		private final int shutdownBrokerId;
 		private final int failCount;
 		private int numElementsTotal;
@@ -2174,10 +2164,10 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		@Override
 		public T map(T value) throws Exception {
 			numElementsTotal++;
-			
+
 			if (!killedLeaderBefore) {
 				Thread.sleep(10);
-				
+
 				if (failer && numElementsTotal >= failCount) {
 					// shut down a Kafka broker
 					KafkaServer toShutDown = null;
@@ -2188,14 +2178,14 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 							break;
 						}
 					}
-	
+
 					if (toShutDown == null) {
 						StringBuilder listOfBrokers = new StringBuilder();
 						for (KafkaServer server : kafkaServer.getBrokers()) {
 							listOfBrokers.append(kafkaServer.getBrokerId(server));
 							listOfBrokers.append(" ; ");
 						}
-						
+
 						throw new Exception("Cannot find broker to shut down: " + shutdownBrokerId
 								+ " ; available brokers: " + listOfBrokers.toString());
 					}
@@ -2266,7 +2256,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 			try {
 				ts.serialize(new Tuple2<>(element.f0, element.f1), out);
 			} catch (IOException e) {
-				throw new RuntimeException("Error" ,e);
+				throw new RuntimeException("Error" , e);
 			}
 			return by.toByteArray();
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java
index bcc8328..e292e13 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java
@@ -43,6 +43,9 @@ import static org.apache.flink.test.util.TestUtils.tryExecute;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+/**
+ * Abstract test base for all Kafka producer tests.
+ */
 @SuppressWarnings("serial")
 public abstract class KafkaProducerTestBase extends KafkaTestBase {
 
@@ -50,7 +53,7 @@ public abstract class KafkaProducerTestBase extends KafkaTestBase {
 	 * This tests verifies that custom partitioning works correctly, with a default topic
 	 * and dynamic topic. The number of partitions for each topic is deliberately different.
 	 *
-	 * Test topology:
+	 * <p>Test topology:
 	 *
 	 * <pre>
 	 *             +------> (sink) --+--> [DEFAULT_TOPIC-1] --> (source) -> (map) -----+
@@ -67,11 +70,11 @@ public abstract class KafkaProducerTestBase extends KafkaTestBase {
 	 *            \                  |                             |          |        |
 	 *             +------> (sink) --+--> [DYNAMIC_TOPIC-3] --> (source) -> (map) -----+
 	 * </pre>
-	 * 
-	 * Each topic has an independent mapper that validates the values come consistently from
+	 *
+	 * <p>Each topic has an independent mapper that validates the values come consistently from
 	 * the correct Kafka partition of the topic is is responsible of.
-	 * 
-	 * Each topic also has a final sink that validates that there are no duplicates and that all
+	 *
+	 * <p>Each topic also has a final sink that validates that there are no duplicates and that all
 	 * partitions are present.
 	 */
 	public void runCustomPartitioningTest() {
@@ -171,7 +174,7 @@ public abstract class KafkaProducerTestBase extends KafkaTestBase {
 
 	// ------------------------------------------------------------------------
 
-	public static class CustomPartitioner extends FlinkKafkaPartitioner<Tuple2<Long, String>> implements Serializable {
+	private static class CustomPartitioner extends FlinkKafkaPartitioner<Tuple2<Long, String>> implements Serializable {
 
 		private final Map<String, Integer> expectedTopicsToNumPartitions;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java
index f688660..d5c9276 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java
@@ -37,7 +37,6 @@ import org.apache.flink.util.InstantiationUtil;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
@@ -58,7 +57,7 @@ import static org.junit.Assert.fail;
  */
 @SuppressWarnings("serial")
 public class KafkaShortRetentionTestBase implements Serializable {
-	
+
 	protected static final Logger LOG = LoggerFactory.getLogger(KafkaShortRetentionTestBase.class);
 
 	protected static final int NUM_TMS = 1;
@@ -66,7 +65,7 @@ public class KafkaShortRetentionTestBase implements Serializable {
 	protected static final int TM_SLOTS = 8;
 
 	protected static final int PARALLELISM = NUM_TMS * TM_SLOTS;
-	
+
 	private static KafkaTestEnvironment kafkaServer;
 	private static Properties standardProps;
 	private static LocalFlinkMiniCluster flink;
@@ -90,7 +89,7 @@ public class KafkaShortRetentionTestBase implements Serializable {
 
 		LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
 
-		if(kafkaServer.isSecureRunSupported()) {
+		if (kafkaServer.isSecureRunSupported()) {
 			secureProps = kafkaServer.getSecureProperties();
 		}
 
@@ -151,10 +150,8 @@ public class KafkaShortRetentionTestBase implements Serializable {
 		env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
 		env.getConfig().disableSysoutLogging();
 
-
 		// ----------- add producer dataflow ----------
 
-
 		DataStream<String> stream = env.addSource(new RichParallelSourceFunction<String>() {
 
 			private boolean running = true;
@@ -164,7 +161,6 @@ public class KafkaShortRetentionTestBase implements Serializable {
 				int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
 				int limit = cnt + elementsPerPartition;
 
-
 				while (running && !stopProducer && cnt < limit) {
 					ctx.collect("element-" + cnt);
 					cnt++;
@@ -196,14 +192,13 @@ public class KafkaShortRetentionTestBase implements Serializable {
 		kafkaServer.deleteTestTopic(topic);
 	}
 
-	
 	private class NonContinousOffsetsDeserializationSchema implements KeyedDeserializationSchema<String> {
 		private int numJumps;
 		long nextExpected = 0;
 
 		@Override
 		public String deserialize(byte[] messageKey, byte[] message, String topic, int partition, long offset) throws IOException {
-			if(offset != nextExpected) {
+			if (offset != nextExpected) {
 				numJumps++;
 				nextExpected = offset;
 				LOG.info("Registered now jump at offset {}", offset);
@@ -219,7 +214,7 @@ public class KafkaShortRetentionTestBase implements Serializable {
 
 		@Override
 		public boolean isEndOfStream(String nextElement) {
-			if( numJumps >= 5) {
+			if (numJumps >= 5) {
 				// we saw 5 jumps and no failures --> consumer can handle auto.offset.reset
 				stopProducer = true;
 				return true;
@@ -233,15 +228,14 @@ public class KafkaShortRetentionTestBase implements Serializable {
 		}
 	}
 
-
 	/**
-	 * Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none"
+	 * Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none".
 	 * @throws Exception
 	 */
 	public void runFailOnAutoOffsetResetNone() throws Exception {
 		final String topic = "auto-offset-reset-none-test";
 		final int parallelism = 1;
-		
+
 		kafkaServer.createTestTopic(topic, parallelism, 1);
 
 		final StreamExecutionEnvironment env =
@@ -249,7 +243,7 @@ public class KafkaShortRetentionTestBase implements Serializable {
 		env.setParallelism(parallelism);
 		env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
 		env.getConfig().disableSysoutLogging();
-		
+
 		// ----------- add consumer ----------
 
 		Properties customProps = new Properties();
@@ -263,10 +257,10 @@ public class KafkaShortRetentionTestBase implements Serializable {
 
 		try {
 			env.execute("Test auto offset reset none");
-		} catch(Throwable e) {
+		} catch (Throwable e) {
 			// check if correct exception has been thrown
-			if(!e.getCause().getCause().getMessage().contains("Unable to find previous offset")  // kafka 0.8
-			 && !e.getCause().getCause().getMessage().contains("Undefined offset with no reset policy for partition") // kafka 0.9
+			if (!e.getCause().getCause().getMessage().contains("Unable to find previous offset")  // kafka 0.8
+				&& !e.getCause().getCause().getMessage().contains("Undefined offset with no reset policy for partition") // kafka 0.9
 					) {
 				throw e;
 			}
@@ -287,7 +281,7 @@ public class KafkaShortRetentionTestBase implements Serializable {
 		customProps.putAll(standardProps);
 		customProps.putAll(secureProps);
 		customProps.setProperty("auto.offset.reset", "none"); // test that "none" leads to an exception
-		
+
 		try {
 			kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps);
 			fail("should fail with an exception");

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
index d4fe9cc..dcf3167 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
@@ -15,9 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
+package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
@@ -27,8 +26,11 @@ import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWra
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
 import org.apache.flink.table.api.Types;
 import org.apache.flink.types.Row;
+
 import org.junit.Test;
 
+import java.util.Properties;
+
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotSame;
@@ -38,6 +40,9 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
+/**
+ * Abstract test base for all Kafka table sink tests.
+ */
 public abstract class KafkaTableSinkTestBase {
 
 	private static final String TOPIC = "testTopic";
@@ -46,7 +51,7 @@ public abstract class KafkaTableSinkTestBase {
 	private static final FlinkKafkaPartitioner<Row> PARTITIONER = new CustomPartitioner();
 	private static final Properties PROPERTIES = createSinkProperties();
 	@SuppressWarnings("unchecked")
-	private final FlinkKafkaProducerBase<Row> PRODUCER = new FlinkKafkaProducerBase<Row>(
+	private final FlinkKafkaProducerBase<Row> producer = new FlinkKafkaProducerBase<Row>(
 		TOPIC, new KeyedSerializationSchemaWrapper(getSerializationSchema()), PROPERTIES, PARTITIONER) {
 
 		@Override
@@ -61,7 +66,7 @@ public abstract class KafkaTableSinkTestBase {
 		KafkaTableSink kafkaTableSink = spy(createTableSink());
 		kafkaTableSink.emitDataStream(dataStream);
 
-		verify(dataStream).addSink(eq(PRODUCER));
+		verify(dataStream).addSink(eq(producer));
 
 		verify(kafkaTableSink).createKafkaProducer(
 			eq(TOPIC),
@@ -87,7 +92,7 @@ public abstract class KafkaTableSinkTestBase {
 	protected abstract SerializationSchema<Row> getSerializationSchema();
 
 	private KafkaTableSink createTableSink() {
-		return createTableSink(TOPIC, PROPERTIES, PARTITIONER, PRODUCER);
+		return createTableSink(TOPIC, PROPERTIES, PARTITIONER, producer);
 	}
 
 	private static Properties createSinkProperties() {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceTestBase.java
index 341df45..8028bfc 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceTestBase.java
@@ -18,23 +18,29 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
-import org.apache.avro.Schema;
-import org.apache.avro.specific.SpecificRecordBase;
 import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.kafka.testutils.AvroTestUtils;
+import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.table.api.Types;
 import org.apache.flink.types.Row;
-import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
-import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+
+import org.apache.avro.Schema;
+import org.apache.avro.specific.SpecificRecordBase;
 import org.junit.Test;
+
+import java.util.Properties;
+
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
+/**
+ * Abstract test base for all Kafka table sources.
+ */
 public abstract class KafkaTableSourceTestBase {
 
 	private static final String TOPIC = "testTopic";
@@ -47,10 +53,14 @@ public abstract class KafkaTableSourceTestBase {
 		BasicTypeInfo.LONG_TYPE_INFO };
 	private static final Properties PROPERTIES = createSourceProperties();
 
-	// Avro record that matches above schema
+	/**
+	 * Avro record that matches above schema.
+	 */
 	public static class AvroSpecificRecord extends SpecificRecordBase {
 
+		//CHECKSTYLE.OFF: StaticVariableNameCheck - Avro accesses this field by name via reflection.
 		public static Schema SCHEMA$ = AvroTestUtils.createFlatAvroSchema(FIELD_NAMES, FIELD_TYPES);
+		//CHECKSTYLE.ON: StaticVariableNameCheck
 
 		public Long mylong;
 		public String mystring;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java
index 1837af6..c484a4b 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java
@@ -33,17 +33,15 @@ import org.apache.flink.util.TestLogger;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import scala.concurrent.duration.FiniteDuration;
-
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
+import scala.concurrent.duration.FiniteDuration;
 
 /**
  * The base for the Kafka tests. It brings up:
@@ -52,7 +50,7 @@ import java.util.concurrent.TimeUnit;
  *     <li>Three Kafka Brokers (mini clusters)</li>
  *     <li>A Flink mini cluster</li>
  * </ul>
- * 
+ *
  * <p>Code in this test is based on the following GitHub repository:
  * <a href="https://github.com/sakserv/hadoop-mini-clusters">
  *   https://github.com/sakserv/hadoop-mini-clusters</a> (ASL licensed),
@@ -62,7 +60,7 @@ import java.util.concurrent.TimeUnit;
 public abstract class KafkaTestBase extends TestLogger {
 
 	protected static final Logger LOG = LoggerFactory.getLogger(KafkaTestBase.class);
-	
+
 	protected static final int NUMBER_OF_KAFKA_SERVERS = 3;
 
 	protected static final int NUM_TMS = 1;
@@ -74,7 +72,7 @@ public abstract class KafkaTestBase extends TestLogger {
 	protected static String brokerConnectionStrings;
 
 	protected static Properties standardProps;
-	
+
 	protected static LocalFlinkMiniCluster flink;
 
 	protected static FiniteDuration timeout = new FiniteDuration(10, TimeUnit.SECONDS);
@@ -89,7 +87,7 @@ public abstract class KafkaTestBase extends TestLogger {
 	// ------------------------------------------------------------------------
 	//  Setup and teardown of the mini clusters
 	// ------------------------------------------------------------------------
-	
+
 	@BeforeClass
 	public static void prepare() throws ClassNotFoundException {
 
@@ -162,7 +160,7 @@ public abstract class KafkaTestBase extends TestLogger {
 			flink.shutdown();
 		}
 
-		if(secureProps != null) {
+		if (secureProps != null) {
 			secureProps.clear();
 		}
 
@@ -170,12 +168,9 @@ public abstract class KafkaTestBase extends TestLogger {
 
 	}
 
-
-
 	// ------------------------------------------------------------------------
 	//  Execution utilities
 	// ------------------------------------------------------------------------
-	
 
 	protected static void tryExecutePropagateExceptions(StreamExecutionEnvironment see, String name) throws Exception {
 		try {
@@ -200,7 +195,7 @@ public abstract class KafkaTestBase extends TestLogger {
 	protected static void createTestTopic(String topic, int numberOfPartitions, int replicationFactor) {
 		kafkaServer.createTestTopic(topic, numberOfPartitions, replicationFactor);
 	}
-	
+
 	protected static void deleteTestTopic(String topic) {
 		kafkaServer.deleteTestTopic(topic);
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironment.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironment.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironment.java
index 311a1a4..4df3465 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironment.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironment.java
@@ -17,11 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Collections;
-import java.util.List;
-import java.util.Properties;
-
-import kafka.server.KafkaServer;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.datastream.DataStreamSink;
 import org.apache.flink.streaming.api.operators.StreamSink;
@@ -31,8 +26,14 @@ import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
 
+import kafka.server.KafkaServer;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+
 /**
- * Abstract class providing a Kafka test environment
+ * Abstract class providing a Kafka test environment.
  */
 public abstract class KafkaTestEnvironment {
 
@@ -89,9 +90,14 @@ public abstract class KafkaTestEnvironment {
 
 	// -- offset handlers
 
+	/**
+	 * Simple interface to commit and retrieve offsets.
+	 */
 	public interface KafkaOffsetHandler {
 		Long getCommittedOffset(String topicName, int partition);
+
 		void setCommittedOffset(String topicName, int partition, long offset);
+
 		void close();
 	}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/TestFlinkFixedPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/TestFlinkFixedPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/TestFlinkFixedPartitioner.java
deleted file mode 100644
index fa84199..0000000
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/TestFlinkFixedPartitioner.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.flink.streaming.connectors.kafka;
-
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class TestFlinkFixedPartitioner {
-
-
-	/**
-	 * <pre>
-	 *   		Flink Sinks:		Kafka Partitions
-	 * 			1	---------------->	1
-	 * 			2   --------------/
-	 * 			3   -------------/
-	 * 			4	------------/
-	 * </pre>
-	 */
-	@Test
-	public void testMoreFlinkThanBrokers() {
-		FlinkFixedPartitioner<String> part = new FlinkFixedPartitioner<>();
-
-		int[] partitions = new int[]{0};
-
-		part.open(0, 4);
-		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
-
-		part.open(1, 4);
-		Assert.assertEquals(0, part.partition("abc2", null, null, null, partitions));
-
-		part.open(2, 4);
-		Assert.assertEquals(0, part.partition("abc3", null, null, null, partitions));
-		Assert.assertEquals(0, part.partition("abc3", null, null, null, partitions)); // check if it is changing ;)
-
-		part.open(3, 4);
-		Assert.assertEquals(0, part.partition("abc4", null, null, null, partitions));
-	}
-
-	/**
-	 *
-	 * <pre>
-	 * 		Flink Sinks:		Kafka Partitions
-	 * 			1	---------------->	1
-	 * 			2	---------------->	2
-	 * 									3
-	 * 									4
-	 * 									5
-	 *
-	 * </pre>
-	 */
-	@Test
-	public void testFewerPartitions() {
-		FlinkFixedPartitioner<String> part = new FlinkFixedPartitioner<>();
-
-		int[] partitions = new int[]{0, 1, 2, 3, 4};
-		part.open(0, 2);
-		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
-		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
-
-		part.open(1, 2);
-		Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions));
-		Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions));
-	}
-
-	/*
-	 * 		Flink Sinks:		Kafka Partitions
-	 * 			1	------------>--->	1
-	 * 			2	-----------/----> 	2
-	 * 			3	----------/
-	 */
-	@Test
-	public void testMixedCase() {
-		FlinkFixedPartitioner<String> part = new FlinkFixedPartitioner<>();
-		int[] partitions = new int[]{0,1};
-
-		part.open(0, 3);
-		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
-
-		part.open(1, 3);
-		Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions));
-
-		part.open(2, 3);
-		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
-
-	}
-
-}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
index c1a64c4..c83a97e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
@@ -23,13 +23,14 @@ import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks
 import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext;
 import org.apache.flink.streaming.api.watermark.Watermark;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
-import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService;
 import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService;
+import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService;
 import org.apache.flink.util.SerializedValue;
 
 import org.junit.Test;
 
 import javax.annotation.Nullable;
+
 import java.util.HashMap;
 import java.util.Map;
 
@@ -38,6 +39,9 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 
+/**
+ * Tests for the {@link AbstractFetcher}.
+ */
 @SuppressWarnings("serial")
 public class AbstractFetcherTest {
 
@@ -191,7 +195,7 @@ public class AbstractFetcherTest {
 		final KafkaTopicPartitionState<Object> part3 = fetcher.subscribedPartitionStates()[2];
 
 		// elements generate a watermark if the timestamp is a multiple of three
-		
+
 		// elements for partition 1
 		fetcher.emitRecord(1L, part1, 1L);
 		fetcher.emitRecord(2L, part1, 2L);
@@ -211,11 +215,11 @@ public class AbstractFetcherTest {
 		fetcher.emitRecord(102L, part3, 2L);
 		assertEquals(102L, sourceContext.getLatestElement().getValue().longValue());
 		assertEquals(102L, sourceContext.getLatestElement().getTimestamp());
-		
+
 		// now, we should have a watermark
 		assertTrue(sourceContext.hasWatermark());
 		assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp());
-		
+
 		// advance partition 3
 		fetcher.emitRecord(1003L, part3, 3L);
 		fetcher.emitRecord(1004L, part3, 4L);
@@ -239,7 +243,7 @@ public class AbstractFetcherTest {
 		assertTrue(sourceContext.hasWatermark());
 		assertEquals(15L, sourceContext.getLatestWatermark().getTimestamp());
 	}
-	
+
 	@Test
 	public void testPeriodicWatermarks() throws Exception {
 		final String testTopic = "test topic name";
@@ -329,8 +333,7 @@ public class AbstractFetcherTest {
 				SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
 				SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
 				ProcessingTimeService processingTimeProvider,
-				long autoWatermarkInterval) throws Exception
-		{
+				long autoWatermarkInterval) throws Exception {
 			super(
 				sourceContext,
 				assignedPartitionsWithStartOffsets,
@@ -391,7 +394,6 @@ public class AbstractFetcherTest {
 			}
 		}
 
-
 		@Override
 		public void markAsTemporarilyIdle() {
 			throw new UnsupportedOperationException();
@@ -412,7 +414,7 @@ public class AbstractFetcherTest {
 		public boolean hasWatermark() {
 			return currentWatermark != null;
 		}
-		
+
 		public Watermark getLatestWatermark() throws InterruptedException {
 			synchronized (watermarkLock) {
 				while (currentWatermark == null) {
@@ -430,7 +432,7 @@ public class AbstractFetcherTest {
 	private static class PeriodicTestExtractor implements AssignerWithPeriodicWatermarks<Long> {
 
 		private volatile long maxTimestamp = Long.MIN_VALUE;
-		
+
 		@Override
 		public long extractTimestamp(Long element, long previousElementTimestamp) {
 			maxTimestamp = Math.max(maxTimestamp, element);
@@ -456,6 +458,6 @@ public class AbstractFetcherTest {
 		public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
 			return extractedTimestamp % 3 == 0 ? new Watermark(extractedTimestamp) : null;
 		}
-		
+
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java
index b215bd3..4496a26 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java
@@ -27,8 +27,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+/**
+ * Tests for the {@link KafkaTopicPartition}.
+ */
 public class KafkaTopicPartitionTest {
-	
+
 	@Test
 	public void validateUid() {
 		Field uidField;
@@ -40,14 +43,14 @@ public class KafkaTopicPartitionTest {
 			fail("serialVersionUID is not defined");
 			return;
 		}
-		
+
 		assertTrue(Modifier.isStatic(uidField.getModifiers()));
 		assertTrue(Modifier.isFinal(uidField.getModifiers()));
 		assertTrue(Modifier.isPrivate(uidField.getModifiers()));
-		
+
 		assertEquals(long.class, uidField.getType());
-		
-		// the UID has to be constant to make sure old checkpoints/savepoints can be read 
+
+		// the UID has to be constant to make sure old checkpoints/savepoints can be read
 		try {
 			assertEquals(722083576322742325L, uidField.getLong(null));
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/AvroTestUtils.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/AvroTestUtils.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/AvroTestUtils.java
index 075b79b..a41125a 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/AvroTestUtils.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/AvroTestUtils.java
@@ -18,13 +18,6 @@
 
 package org.apache.flink.streaming.connectors.kafka.testutils;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import org.apache.avro.Schema;
-import org.apache.avro.SchemaBuilder;
-import org.apache.avro.reflect.ReflectData;
-import org.apache.avro.specific.SpecificRecord;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.io.avro.generated.Address;
 import org.apache.flink.api.io.avro.generated.Colors;
@@ -32,12 +25,21 @@ import org.apache.flink.api.io.avro.generated.User;
 import org.apache.flink.api.java.tuple.Tuple3;
 import org.apache.flink.types.Row;
 
+import org.apache.avro.Schema;
+import org.apache.avro.SchemaBuilder;
+import org.apache.avro.reflect.ReflectData;
+import org.apache.avro.specific.SpecificRecord;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+
 /**
  * Utilities for creating Avro Schemas.
  */
 public final class AvroTestUtils {
 
-	private static String NAMESPACE = "org.apache.flink.streaming.connectors.kafka";
+	private static final String NAMESPACE = "org.apache.flink.streaming.connectors.kafka";
 
 	/**
 	 * Creates a flat Avro Schema for testing.

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/DataGenerators.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/DataGenerators.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/DataGenerators.java
index c0fb836..b204ea9 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/DataGenerators.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/DataGenerators.java
@@ -18,10 +18,6 @@
 
 package org.apache.flink.streaming.connectors.kafka.testutils;
 
-import java.util.Collection;
-import java.util.Properties;
-import java.util.Random;
-
 import org.apache.flink.api.common.JobExecutionResult;
 import org.apache.flink.api.common.functions.RichFunction;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
@@ -43,14 +39,22 @@ import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWra
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 import org.apache.flink.streaming.util.serialization.TypeInformationSerializationSchema;
 
+import java.util.Collection;
+import java.util.Properties;
+import java.util.Random;
+
+/**
+ * Test data generators.
+ */
 @SuppressWarnings("serial")
 public class DataGenerators {
 
-	public static void generateRandomizedIntegerSequence(StreamExecutionEnvironment env,
-														 KafkaTestEnvironment testServer, String topic,
-														 final int numPartitions,
-														 final int numElements,
-														 final boolean randomizeOrder) throws Exception {
+	public static void generateRandomizedIntegerSequence(
+			StreamExecutionEnvironment env,
+			KafkaTestEnvironment testServer, String topic,
+			final int numPartitions,
+			final int numElements,
+			final boolean randomizeOrder) throws Exception {
 		env.setParallelism(numPartitions);
 		env.getConfig().disableSysoutLogging();
 		env.setRestartStrategy(RestartStrategies.noRestart());
@@ -65,8 +69,8 @@ public class DataGenerators {
 						// create a sequence
 						int[] elements = new int[numElements];
 						for (int i = 0, val = getRuntimeContext().getIndexOfThisSubtask();
-							 i < numElements;
-							 i++, val += getRuntimeContext().getNumberOfParallelSubtasks()) {
+							i < numElements;
+							i++, val += getRuntimeContext().getNumberOfParallelSubtasks()) {
 
 							elements[i] = val;
 						}
@@ -99,7 +103,7 @@ public class DataGenerators {
 		Properties props = new Properties();
 		props.putAll(FlinkKafkaProducerBase.getPropertiesFromBrokerList(testServer.getBrokerConnectionString()));
 		Properties secureProps = testServer.getSecureProperties();
-		if(secureProps != null) {
+		if (secureProps != null) {
 			props.putAll(testServer.getSecureProperties());
 		}
 
@@ -119,6 +123,10 @@ public class DataGenerators {
 
 	// ------------------------------------------------------------------------
 
+	/**
+	 * A generator that continuously writes strings into the configured topic. The generation is stopped if an exception
+	 * occurs or {@link #shutdown()} is called.
+	 */
 	public static class InfiniteStringsGenerator extends Thread {
 
 		private final KafkaTestEnvironment server;
@@ -129,7 +137,6 @@ public class DataGenerators {
 
 		private volatile boolean running = true;
 
-
 		public InfiniteStringsGenerator(KafkaTestEnvironment server, String topic) {
 			this.server = server;
 			this.topic = topic;
@@ -164,7 +171,7 @@ public class DataGenerators {
 
 					int len = rnd.nextInt(100) + 1;
 					for (int i = 0; i < len; i++) {
-						bld.append((char) (rnd.nextInt(20) + 'a') );
+						bld.append((char) (rnd.nextInt(20) + 'a'));
 					}
 
 					String next = bld.toString();
@@ -211,7 +218,7 @@ public class DataGenerators {
 			}
 		}
 
-		public static class DummyStreamExecutionEnvironment extends StreamExecutionEnvironment {
+		private static class DummyStreamExecutionEnvironment extends StreamExecutionEnvironment {
 
 			@Override
 			public JobExecutionResult execute(String jobName) throws Exception {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FailingIdentityMapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FailingIdentityMapper.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FailingIdentityMapper.java
index ec64b00..c25eefb 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FailingIdentityMapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FailingIdentityMapper.java
@@ -22,30 +22,35 @@ import org.apache.flink.api.common.functions.RichMapFunction;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.runtime.state.CheckpointListener;
 import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.Collections;
 import java.util.List;
 
-
-public class FailingIdentityMapper<T> extends RichMapFunction<T,T> implements
+/**
+ * A {@link RichMapFunction} that fails after the configured number of records have been processed.
+ *
+ * @param <T>
+ */
+public class FailingIdentityMapper<T> extends RichMapFunction<T, T> implements
 	ListCheckpointed<Integer>, CheckpointListener, Runnable {
-	
+
 	private static final Logger LOG = LoggerFactory.getLogger(FailingIdentityMapper.class);
-	
+
 	private static final long serialVersionUID = 6334389850158707313L;
-	
+
 	public static volatile boolean failedBefore;
 	public static volatile boolean hasBeenCheckpointedBeforeFailure;
 
 	private final int failCount;
 	private int numElementsTotal;
 	private int numElementsThisTime;
-	
+
 	private boolean failer;
 	private boolean hasBeenCheckpointed;
-	
+
 	private Thread printer;
 	private volatile boolean printerRunning = true;
 
@@ -64,10 +69,10 @@ public class FailingIdentityMapper<T> extends RichMapFunction<T,T> implements
 	public T map(T value) throws Exception {
 		numElementsTotal++;
 		numElementsThisTime++;
-		
+
 		if (!failedBefore) {
 			Thread.sleep(10);
-			
+
 			if (failer && numElementsTotal >= failCount) {
 				hasBeenCheckpointedBeforeFailure = hasBeenCheckpointed;
 				failedBefore = true;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FakeStandardProducerConfig.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FakeStandardProducerConfig.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FakeStandardProducerConfig.java
index 055326d..0fbe554 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FakeStandardProducerConfig.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/FakeStandardProducerConfig.java
@@ -15,12 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka.testutils;
 
 import org.apache.kafka.common.serialization.ByteArraySerializer;
 
 import java.util.Properties;
 
+/**
+ * Test configuration for a kafka producer.
+ */
 public class FakeStandardProducerConfig {
 
 	public static Properties get() {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/JobManagerCommunicationUtils.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/JobManagerCommunicationUtils.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/JobManagerCommunicationUtils.java
index 131325f..9bbe1d3 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/JobManagerCommunicationUtils.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/JobManagerCommunicationUtils.java
@@ -23,13 +23,16 @@ import org.apache.flink.runtime.client.JobStatusMessage;
 import org.apache.flink.runtime.instance.ActorGateway;
 import org.apache.flink.runtime.messages.JobManagerMessages;
 
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
+/**
+ * Utilities for communicating with a jobmanager through a {@link ActorGateway}.
+ */
 public class JobManagerCommunicationUtils {
 
 	private static final FiniteDuration askTimeout = new FiniteDuration(30, TimeUnit.SECONDS);
@@ -43,7 +46,6 @@ public class JobManagerCommunicationUtils {
 			Object result = Await.result(listResponse, askTimeout);
 			List<JobStatusMessage> jobs = ((JobManagerMessages.RunningJobsStatus) result).getStatusMessages();
 
-
 			if (jobs.isEmpty()) {
 				return;
 			}
@@ -84,13 +86,13 @@ public class JobManagerCommunicationUtils {
 
 	public static void cancelCurrentJob(ActorGateway jobManager, String name) throws Exception {
 		JobStatusMessage status = null;
-		
+
 		for (int i = 0; i < 200; i++) {
 			// find the jobID
 			Future<Object> listResponse = jobManager.ask(
 					JobManagerMessages.getRequestRunningJobsStatus(),
 					askTimeout);
-	
+
 			List<JobStatusMessage> jobs;
 			try {
 				Object result = Await.result(listResponse, askTimeout);
@@ -99,7 +101,7 @@ public class JobManagerCommunicationUtils {
 			catch (Exception e) {
 				throw new Exception("Could not cancel job - failed to retrieve running jobs from the JobManager.", e);
 			}
-		
+
 			if (jobs.isEmpty()) {
 				// try again, fall through the loop
 				Thread.sleep(50);
@@ -107,33 +109,33 @@ public class JobManagerCommunicationUtils {
 			else if (jobs.size() == 1) {
 				status = jobs.get(0);
 			}
-			else if(name != null) {
-				for(JobStatusMessage msg: jobs) {
-					if(msg.getJobName().equals(name)) {
+			else if (name != null) {
+				for (JobStatusMessage msg: jobs) {
+					if (msg.getJobName().equals(name)) {
 						status = msg;
 					}
 				}
-				if(status == null) {
-					throw new Exception("Could not cancel job - no job matched expected name = '" + name +"' in " + jobs);
+				if (status == null) {
+					throw new Exception("Could not cancel job - no job matched expected name = '" + name + "' in " + jobs);
 				}
 			} else {
 				String jobNames = "";
-				for(JobStatusMessage jsm: jobs) {
+				for (JobStatusMessage jsm: jobs) {
 					jobNames += jsm.getJobName() + ", ";
 				}
 				throw new Exception("Could not cancel job - more than one running job: " + jobNames);
 			}
 		}
-		
+
 		if (status == null) {
-			throw new Exception("Could not cancel job - no running jobs");	
+			throw new Exception("Could not cancel job - no running jobs");
 		}
 		else if (status.getJobState().isGloballyTerminalState()) {
 			throw new Exception("Could not cancel job - job is not running any more");
 		}
-		
+
 		JobID jobId = status.getJobId();
-		
+
 		Future<Object> response = jobManager.ask(new JobManagerMessages.CancelJob(jobId), askTimeout);
 		try {
 			Await.result(response, askTimeout);

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/PartitionValidatingMapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/PartitionValidatingMapper.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/PartitionValidatingMapper.java
index e105e01..29e469d 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/PartitionValidatingMapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/PartitionValidatingMapper.java
@@ -23,14 +23,16 @@ import org.apache.flink.api.common.functions.MapFunction;
 import java.util.HashSet;
 import java.util.Set;
 
-
+/**
+ * {@link MapFunction} that verifies that he partitioning is identical.
+ */
 public class PartitionValidatingMapper implements MapFunction<Integer, Integer> {
 
 	private static final long serialVersionUID = 1088381231244959088L;
-	
+
 	/* the partitions from which this function received data */
 	private final Set<Integer> myPartitions = new HashSet<>();
-	
+
 	private final int numPartitions;
 	private final int maxPartitions;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ThrottledMapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ThrottledMapper.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ThrottledMapper.java
index 1d61229..040f15c 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ThrottledMapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ThrottledMapper.java
@@ -23,10 +23,10 @@ import org.apache.flink.api.common.functions.MapFunction;
 /**
  * An identity map function that sleeps between elements, throttling the
  * processing speed.
- * 
+ *
  * @param <T> The type mapped.
  */
-public class ThrottledMapper<T> implements MapFunction<T,T> {
+public class ThrottledMapper<T> implements MapFunction<T, T> {
 
 	private static final long serialVersionUID = 467008933767159126L;
 
@@ -41,4 +41,4 @@ public class ThrottledMapper<T> implements MapFunction<T,T> {
 		Thread.sleep(this.sleep);
 		return value;
 	}
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/Tuple2FlinkPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/Tuple2FlinkPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/Tuple2FlinkPartitioner.java
index e7fff52..6f2c4a1 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/Tuple2FlinkPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/Tuple2FlinkPartitioner.java
@@ -33,13 +33,13 @@ public class Tuple2FlinkPartitioner extends FlinkKafkaPartitioner<Tuple2<Integer
 	public Tuple2FlinkPartitioner(int expectedPartitions) {
 		this.expectedPartitions = expectedPartitions;
 	}
-	
+
 	@Override
 	public int partition(Tuple2<Integer, Integer> next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
 		if (partitions.length != expectedPartitions) {
 			throw new IllegalArgumentException("Expected " + expectedPartitions + " partitions");
 		}
-		
+
 		return next.f0;
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ValidatingExactlyOnceSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ValidatingExactlyOnceSink.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ValidatingExactlyOnceSink.java
index 46e70fd..5ace012 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ValidatingExactlyOnceSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ValidatingExactlyOnceSink.java
@@ -22,6 +22,7 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
 import org.apache.flink.test.util.SuccessException;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,14 +30,17 @@ import java.util.BitSet;
 import java.util.Collections;
 import java.util.List;
 
+/**
+ * A {@link RichSinkFunction} that verifies that no duplicate records are generated.
+ */
 public class ValidatingExactlyOnceSink extends RichSinkFunction<Integer> implements ListCheckpointed<Tuple2<Integer, BitSet>> {
 
 	private static final Logger LOG = LoggerFactory.getLogger(ValidatingExactlyOnceSink.class);
 
 	private static final long serialVersionUID = 1748426382527469932L;
-	
+
 	private final int numElementsTotal;
-	
+
 	private BitSet duplicateChecker = new BitSet();  // this is checkpointed
 
 	private int numElements; // this is checkpointed
@@ -45,11 +49,10 @@ public class ValidatingExactlyOnceSink extends RichSinkFunction<Integer> impleme
 		this.numElementsTotal = numElementsTotal;
 	}
 
-	
 	@Override
 	public void invoke(Integer value) throws Exception {
 		numElements++;
-		
+
 		if (duplicateChecker.get(value)) {
 			throw new Exception("Received a duplicate: " + value);
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ZooKeeperStringSerializer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ZooKeeperStringSerializer.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ZooKeeperStringSerializer.java
index 37ed408..9aa1207 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ZooKeeperStringSerializer.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/ZooKeeperStringSerializer.java
@@ -18,9 +18,10 @@
 
 package org.apache.flink.streaming.connectors.kafka.testutils;
 
-import org.I0Itec.zkclient.serialize.ZkSerializer;
 import org.apache.flink.configuration.ConfigConstants;
 
+import org.I0Itec.zkclient.serialize.ZkSerializer;
+
 /**
  * Simple ZooKeeper serializer for Strings.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/resources/log4j-test.properties b/flink-connectors/flink-connector-kafka-base/src/test/resources/log4j-test.properties
index 16c226f..a3fb2b0 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-connector-kafka-base/src/test/resources/log4j-test.properties
@@ -26,4 +26,3 @@ log4j.appender.testlogger.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
 # suppress the irrelevant (wrong) warnings from the netty channel handler
 log4j.logger.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, testlogger
 
-


[03/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-rabbitmq

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connector-rabbitmq


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d4f73391
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d4f73391
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d4f73391

Branch: refs/heads/master
Commit: d4f73391708bdfe466a5c3c771bb02f0fc3e1d03
Parents: 7ac4a24
Author: zentol <ch...@apache.org>
Authored: Wed May 24 22:49:55 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:06 2017 +0200

----------------------------------------------------------------------
 .../streaming/connectors/rabbitmq/RMQSink.java  | 17 ++++----
 .../connectors/rabbitmq/RMQSource.java          | 27 ++++++------
 .../rabbitmq/common/RMQConnectionConfig.java    | 46 ++++++++++++--------
 .../connectors/rabbitmq/RMQSourceTest.java      | 23 +++++-----
 .../common/RMQConnectionConfigTest.java         |  5 ++-
 .../connectors/rabbitmq/common/RMQSinkTest.java | 19 +++++---
 6 files changed, 78 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/d4f73391/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSink.java b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSink.java
index a0795d6..48675c5 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSink.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSink.java
@@ -17,21 +17,21 @@
 
 package org.apache.flink.streaming.connectors.rabbitmq;
 
-import java.io.IOException;
-
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
 import org.apache.flink.streaming.connectors.rabbitmq.common.RMQConnectionConfig;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.rabbitmq.client.Channel;
 import com.rabbitmq.client.Connection;
 import com.rabbitmq.client.ConnectionFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
 
 /**
- * A Sink for publishing data into RabbitMQ
+ * A Sink for publishing data into RabbitMQ.
  * @param <IN>
  */
 public class RMQSink<IN> extends RichSinkFunction<IN> {
@@ -78,7 +78,6 @@ public class RMQSink<IN> extends RichSinkFunction<IN> {
 		this.logFailuresOnly = logFailuresOnly;
 	}
 
-
 	@Override
 	public void open(Configuration config) throws Exception {
 		ConnectionFactory factory = rmqConnectionConfig.getConnectionFactory();
@@ -110,7 +109,7 @@ public class RMQSink<IN> extends RichSinkFunction<IN> {
 			if (logFailuresOnly) {
 				LOG.error("Cannot send RMQ message {} at {}", queueName, rmqConnectionConfig.getHost(), e);
 			} else {
-				throw new RuntimeException("Cannot send RMQ message " + queueName +" at " + rmqConnectionConfig.getHost(), e);
+				throw new RuntimeException("Cannot send RMQ message " + queueName + " at " + rmqConnectionConfig.getHost(), e);
 			}
 		}
 
@@ -128,12 +127,12 @@ public class RMQSink<IN> extends RichSinkFunction<IN> {
 		try {
 			connection.close();
 		} catch (IOException e) {
-			if(t != null) {
+			if (t != null) {
 				LOG.warn("Both channel and connection closing failed. Logging channel exception and failing with connection exception", t);
 			}
 			t = e;
 		}
-		if(t != null) {
+		if (t != null) {
 			throw new RuntimeException("Error while closing RMQ connection with " + queueName
 					+ " at " + rmqConnectionConfig.getHost(), t);
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/d4f73391/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSource.java b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSource.java
index ee9c3b9..12a35f1 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSource.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSource.java
@@ -17,9 +17,6 @@
 
 package org.apache.flink.streaming.connectors.rabbitmq;
 
-import java.io.IOException;
-import java.util.List;
-
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
@@ -38,30 +35,33 @@ import com.rabbitmq.client.QueueingConsumer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.util.List;
+
 /**
  * RabbitMQ source (consumer) which reads from a queue and acknowledges messages on checkpoints.
  * When checkpointing is enabled, it guarantees exactly-once processing semantics.
  *
- * RabbitMQ requires messages to be acknowledged. On failures, RabbitMQ will re-resend all messages
+ * <p>RabbitMQ requires messages to be acknowledged. On failures, RabbitMQ will re-resend all messages
  * which have not been acknowledged previously. When a failure occurs directly after a completed
  * checkpoint, all messages part of this checkpoint might be processed again because they couldn't
  * be acknowledged before failure. This case is handled by the {@link MessageAcknowledgingSourceBase}
  * base class which deduplicates the messages using the correlation id.
  *
- * RabbitMQ's Delivery Tags do NOT represent unique ids / offsets. That's why the source uses the
+ * <p>RabbitMQ's Delivery Tags do NOT represent unique ids / offsets. That's why the source uses the
  * Correlation ID in the message properties to check for duplicate messages. Note that the
  * correlation id has to be set at the producer. If the correlation id is not set, messages may
  * be produced more than once in corner cases.
  *
- * This source can be operated in three different modes:
+ * <p>This source can be operated in three different modes:
  *
- * 1) Exactly-once (when checkpointed) with RabbitMQ transactions and messages with
+ * <p>1) Exactly-once (when checkpointed) with RabbitMQ transactions and messages with
  *    unique correlation IDs.
  * 2) At-least-once (when checkpointed) with RabbitMQ transactions but no deduplication mechanism
  *    (correlation id is not set).
  * 3) No strong delivery guarantees (without checkpointing) with RabbitMQ auto-commit mode.
  *
- * Users may overwrite the setupConnectionFactory() method to pass their setup their own
+ * <p>Users may overwrite the setupConnectionFactory() method to pass their setup their own
  * ConnectionFactory in case the constructor parameters are not sufficient.
  *
  * @param <OUT> The type of the data read from RabbitMQ.
@@ -89,9 +89,9 @@ public class RMQSource<OUT> extends MultipleIdsMessageAcknowledgingSourceBase<OU
 	/**
 	 * Creates a new RabbitMQ source with at-least-once message processing guarantee when
 	 * checkpointing is enabled. No strong delivery guarantees when checkpointing is disabled.
-	 * For exactly-once, please use the constructor
-	 * {@link RMQSource#RMQSource(RMQConnectionConfig, String, boolean usesCorrelationId, DeserializationSchema)},
-	 * set {@param usesCorrelationId} to true and enable checkpointing.
+	 *
+	 * <p>For exactly-once, please use the constructor
+	 * {@link RMQSource#RMQSource(RMQConnectionConfig, String, boolean, DeserializationSchema)}.
 	 * @param rmqConnectionConfig The RabbiMQ connection configuration {@link RMQConnectionConfig}.
 	 * @param queueName  The queue to receive messages from.
 	 * @param deserializationSchema A {@link DeserializationSchema} for turning the bytes received
@@ -105,7 +105,7 @@ public class RMQSource<OUT> extends MultipleIdsMessageAcknowledgingSourceBase<OU
 	/**
 	 * Creates a new RabbitMQ source. For exactly-once, you must set the correlation ids of messages
 	 * at the producer. The correlation id must be unique. Otherwise the behavior of the source is
-	 * undefined. In doubt, set {@param usesCorrelationId} to false. When correlation ids are not
+	 * undefined. If in doubt, set usesCorrelationId to false. When correlation ids are not
 	 * used, this source has at-least-once processing semantics when checkpointing is enabled.
 	 * @param rmqConnectionConfig The RabbiMQ connection configuration {@link RMQConnectionConfig}.
 	 * @param queueName The queue to receive messages from.
@@ -116,7 +116,7 @@ public class RMQSource<OUT> extends MultipleIdsMessageAcknowledgingSourceBase<OU
 	 *                              into Java objects.
 	 */
 	public RMQSource(RMQConnectionConfig rmqConnectionConfig,
-					String queueName, boolean usesCorrelationId,DeserializationSchema<OUT> deserializationSchema) {
+					String queueName, boolean usesCorrelationId, DeserializationSchema<OUT> deserializationSchema) {
 		super(String.class);
 		this.rmqConnectionConfig = rmqConnectionConfig;
 		this.queueName = queueName;
@@ -185,7 +185,6 @@ public class RMQSource<OUT> extends MultipleIdsMessageAcknowledgingSourceBase<OU
 		}
 	}
 
-
 	@Override
 	public void run(SourceContext<OUT> ctx) throws Exception {
 		while (running) {

http://git-wip-us.apache.org/repos/asf/flink/blob/d4f73391/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
index 72bac1c..cce800a 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
@@ -17,8 +17,9 @@
 
 package org.apache.flink.streaming.connectors.rabbitmq.common;
 
-import com.rabbitmq.client.ConnectionFactory;
 import org.apache.flink.util.Preconditions;
+
+import com.rabbitmq.client.ConnectionFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -176,7 +177,7 @@ public class RMQConnectionConfig implements Serializable {
 	}
 
 	/**
-	 * Returns true if automatic connection recovery is enabled, false otherwise
+	 * Returns true if automatic connection recovery is enabled, false otherwise.
 	 * @return true if automatic connection recovery is enabled, false otherwise
 	 */
 	public Boolean isAutomaticRecovery() {
@@ -184,7 +185,7 @@ public class RMQConnectionConfig implements Serializable {
 	}
 
 	/**
-	 * Returns true if topology recovery is enabled, false otherwise
+	 * Returns true if topology recovery is enabled, false otherwise.
 	 * @return true if topology recovery is enabled, false otherwise
 	 */
 	public Boolean isTopologyRecovery() {
@@ -200,7 +201,7 @@ public class RMQConnectionConfig implements Serializable {
 	}
 
 	/**
-	 * Retrieve the requested maximum channel number
+	 * Retrieve the requested maximum channel number.
 	 * @return the initially requested maximum channel number; zero for unlimited
 	 */
 	public Integer getRequestedChannelMax() {
@@ -208,7 +209,7 @@ public class RMQConnectionConfig implements Serializable {
 	}
 
 	/**
-	 * Retrieve the requested maximum frame size
+	 * Retrieve the requested maximum frame size.
 	 * @return the initially requested maximum frame size, in octets; zero for unlimited
 	 */
 	public Integer getRequestedFrameMax() {
@@ -226,7 +227,9 @@ public class RMQConnectionConfig implements Serializable {
 	/**
 	 *
 	 * @return Connection Factory for RMQ
-	 * @throws URISyntaxException, NoSuchAlgorithmException, KeyManagementException if Malformed URI has been passed
+	 * @throws URISyntaxException if Malformed URI has been passed
+	 * @throws NoSuchAlgorithmException if the ssl factory could not be created
+	 * @throws KeyManagementException if the ssl context could not be initialized
 	 */
 	public ConnectionFactory getConnectionFactory() throws URISyntaxException,
 		NoSuchAlgorithmException, KeyManagementException {
@@ -234,9 +237,17 @@ public class RMQConnectionConfig implements Serializable {
 		if (this.uri != null && !this.uri.isEmpty()){
 			try {
 				factory.setUri(this.uri);
-			} catch (URISyntaxException | NoSuchAlgorithmException | KeyManagementException e) {
+			} catch (URISyntaxException e) {
 				LOG.error("Failed to parse uri", e);
 				throw e;
+			} catch (KeyManagementException e) {
+				// this should never happen
+				LOG.error("Failed to initialize ssl context.", e);
+				throw e;
+			} catch (NoSuchAlgorithmException e) {
+				// this should never happen
+				LOG.error("Failed to setup ssl factory.", e);
+				throw e;
 			}
 		} else {
 			factory.setHost(this.host);
@@ -272,7 +283,7 @@ public class RMQConnectionConfig implements Serializable {
 	}
 
 	/**
-	 * The Builder Class for {@link RMQConnectionConfig}
+	 * The Builder Class for {@link RMQConnectionConfig}.
 	 */
 	public static class Builder {
 
@@ -355,7 +366,7 @@ public class RMQConnectionConfig implements Serializable {
 		}
 
 		/**
-		 * Enables or disables topology recovery
+		 * Enables or disables topology recovery.
 		 * @param topologyRecovery if true, enables topology recovery
 		 * @return the Builder
 		 */
@@ -375,7 +386,7 @@ public class RMQConnectionConfig implements Serializable {
 		}
 
 		/**
-		 * Set the requested maximum frame size
+		 * Set the requested maximum frame size.
 		 * @param requestedFrameMax initially requested maximum frame size, in octets; zero for unlimited
 		 * @return the Builder
 		 */
@@ -385,7 +396,7 @@ public class RMQConnectionConfig implements Serializable {
 		}
 
 		/**
-		 * Set the requested maximum channel number
+		 * Set the requested maximum channel number.
 		 * @param requestedChannelMax initially requested maximum channel number; zero for unlimited
 		 */
 		public Builder setRequestedChannelMax(int requestedChannelMax) {
@@ -414,7 +425,7 @@ public class RMQConnectionConfig implements Serializable {
 		}
 
 		/**
-		 * Enables or disables automatic connection recovery
+		 * Enables or disables automatic connection recovery.
 		 * @param automaticRecovery if true, enables connection recovery
 		 * @return the Builder
 		 */
@@ -424,17 +435,18 @@ public class RMQConnectionConfig implements Serializable {
 		}
 
 		/**
-		 * The Builder method
-		 * If URI is NULL we use host, port, vHost, username, password combination
+		 * The Builder method.
+		 *
+		 * <p>If URI is NULL we use host, port, vHost, username, password combination
 		 * to initialize connection. using  {@link RMQConnectionConfig#RMQConnectionConfig(String, Integer, String, String, String,
-		 * Integer, Boolean, Boolean, Integer, Integer, Integer, Integer)}
+		 * Integer, Boolean, Boolean, Integer, Integer, Integer, Integer)}.
 		 *
-		 * else URI will be used to initialize the client connection
+		 * <p>Otherwise the URI will be used to initialize the client connection
 		 * {@link RMQConnectionConfig#RMQConnectionConfig(String, Integer, Boolean, Boolean, Integer, Integer, Integer, Integer)}
 		 * @return RMQConnectionConfig
 		 */
 		public RMQConnectionConfig build(){
-			if(this.uri != null) {
+			if (this.uri != null) {
 				return new RMQConnectionConfig(this.uri, this.networkRecoveryInterval,
 					this.automaticRecovery, this.topologyRecovery, this.connectionTimeout, this.requestedChannelMax,
 					this.requestedFrameMax, this.requestedHeartbeat);

http://git-wip-us.apache.org/repos/asf/flink/blob/d4f73391/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSourceTest.java b/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSourceTest.java
index b65ddf0..05ae810 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSourceTest.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/RMQSourceTest.java
@@ -17,12 +17,6 @@
 
 package org.apache.flink.streaming.connectors.rabbitmq;
 
-import com.rabbitmq.client.AMQP;
-import com.rabbitmq.client.Channel;
-import com.rabbitmq.client.Connection;
-import com.rabbitmq.client.ConnectionFactory;
-import com.rabbitmq.client.Envelope;
-import com.rabbitmq.client.QueueingConsumer;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.common.state.OperatorStateStore;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -39,6 +33,13 @@ import org.apache.flink.streaming.connectors.rabbitmq.common.RMQConnectionConfig
 import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles;
 import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+
+import com.rabbitmq.client.AMQP;
+import com.rabbitmq.client.Channel;
+import com.rabbitmq.client.Connection;
+import com.rabbitmq.client.ConnectionFactory;
+import com.rabbitmq.client.Envelope;
+import com.rabbitmq.client.QueueingConsumer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -59,7 +60,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 
-
 /**
  * Tests for the RMQSource. The source supports two operation modes.
  * 1) Exactly-once (when checkpointed) with RabbitMQ transactions and the deduplication mechanism in
@@ -67,7 +67,7 @@ import static org.mockito.Matchers.any;
  * 2) At-least-once (when checkpointed) with RabbitMQ transactions but not deduplication.
  * 3) No strong delivery guarantees (without checkpointing) with RabbitMQ auto-commit mode.
  *
- * This tests assumes that the message ids are increasing monotonously. That doesn't have to be the
+ * <p>This tests assumes that the message ids are increasing monotonously. That doesn't have to be the
  * case. The correlation id is used to uniquely identify messages.
  */
 @RunWith(PowerMockRunner.class)
@@ -156,7 +156,7 @@ public class RMQSourceTest {
 
 		long totalNumberOfAcks = 0;
 
-		for (int i=0; i < numSnapshots; i++) {
+		for (int i = 0; i < numSnapshots; i++) {
 			long snapshotId = random.nextLong();
 			OperatorStateHandles data;
 
@@ -230,9 +230,8 @@ public class RMQSourceTest {
 		}
 	}
 
-
 	/**
-	 * The source should not acknowledge ids in auto-commit mode or check for previously acknowledged ids
+	 * The source should not acknowledge ids in auto-commit mode or check for previously acknowledged ids.
 	 */
 	@Test
 	public void testCheckpointingDisabled() throws Exception {
@@ -248,7 +247,7 @@ public class RMQSourceTest {
 	}
 
 	/**
-	 * Tests error reporting in case of invalid correlation ids
+	 * Tests error reporting in case of invalid correlation ids.
 	 */
 	@Test
 	public void testCorrelationIdNotSet() throws InterruptedException {

http://git-wip-us.apache.org/repos/asf/flink/blob/d4f73391/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfigTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfigTest.java b/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfigTest.java
index 40985ce..9cfac92 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfigTest.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfigTest.java
@@ -26,7 +26,9 @@ import java.security.NoSuchAlgorithmException;
 
 import static org.junit.Assert.assertEquals;
 
-
+/**
+ * Tests for the {@link RMQConnectionConfig}.
+ */
 public class RMQConnectionConfigTest {
 
 	@Test(expected = NullPointerException.class)
@@ -37,6 +39,7 @@ public class RMQConnectionConfigTest {
 			.setPassword("guest").setVirtualHost("/").build();
 		connectionConfig.getConnectionFactory();
 	}
+
 	@Test(expected = NullPointerException.class)
 	public void shouldThrowNullPointExceptionIfPortIsNull() throws NoSuchAlgorithmException,
 		KeyManagementException, URISyntaxException {

http://git-wip-us.apache.org/repos/asf/flink/blob/d4f73391/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQSinkTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQSinkTest.java b/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQSinkTest.java
index 199cd1e..540a7ba 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQSinkTest.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/test/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQSinkTest.java
@@ -17,20 +17,28 @@
 
 package org.apache.flink.streaming.connectors.rabbitmq.common;
 
-import com.rabbitmq.client.Channel;
-import com.rabbitmq.client.Connection;
-import com.rabbitmq.client.ConnectionFactory;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.connectors.rabbitmq.RMQSink;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+
+import com.rabbitmq.client.Channel;
+import com.rabbitmq.client.Connection;
+import com.rabbitmq.client.ConnectionFactory;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
 
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
-
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for the {@link RMQSink}.
+ */
 public class RMQSinkTest {
 
 	private static final String QUEUE_NAME = "queue";
@@ -43,7 +51,6 @@ public class RMQSinkTest {
 	private Channel channel;
 	private SerializationSchema<String> serializationSchema;
 
-
 	@Before
 	public void before() throws Exception {
 		serializationSchema = spy(new DummySerializationSchema());


[09/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kafka*

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
index c736493..06cdf2c 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
@@ -19,24 +19,25 @@
 package org.apache.flink.streaming.connectors.kafka.internals;
 
 import javax.annotation.Nullable;
+
 import java.util.concurrent.atomic.AtomicReference;
 
 /**
  * A proxy that communicates exceptions between threads. Typically used if an exception
  * from a spawned thread needs to be recognized by the "parent" (spawner) thread.
- * 
+ *
  * <p>The spawned thread would set the exception via {@link #reportError(Throwable)}.
  * The parent would check (at certain points) for exceptions via {@link #checkAndThrowException()}.
  * Optionally, the parent can pass itself in the constructor to be interrupted as soon as
  * an exception occurs.
- * 
+ *
  * <pre>
  * {@code
- * 
+ *
  * final ExceptionProxy errorProxy = new ExceptionProxy(Thread.currentThread());
- * 
+ *
  * Thread subThread = new Thread() {
- * 
+ *
  *     public void run() {
  *         try {
  *             doSomething();
@@ -48,13 +49,13 @@ import java.util.concurrent.atomic.AtomicReference;
  *     }
  * };
  * subThread.start();
- * 
+ *
  * doSomethingElse();
  * errorProxy.checkAndThrowException();
- * 
+ *
  * doSomethingMore();
  * errorProxy.checkAndThrowException();
- * 
+ *
  * try {
  *     subThread.join();
  * } catch (InterruptedException e) {
@@ -66,33 +67,33 @@ import java.util.concurrent.atomic.AtomicReference;
  * </pre>
  */
 public class ExceptionProxy {
-	
-	/** The thread that should be interrupted when an exception occurs */
+
+	/** The thread that should be interrupted when an exception occurs. */
 	private final Thread toInterrupt;
-	
-	/** The exception to throw */ 
+
+	/** The exception to throw. */
 	private final AtomicReference<Throwable> exception;
 
 	/**
 	 * Creates an exception proxy that interrupts the given thread upon
 	 * report of an exception. The thread to interrupt may be null.
-	 * 
+	 *
 	 * @param toInterrupt The thread to interrupt upon an exception. May be null.
 	 */
 	public ExceptionProxy(@Nullable Thread toInterrupt) {
 		this.toInterrupt = toInterrupt;
 		this.exception = new AtomicReference<>();
 	}
-	
+
 	// ------------------------------------------------------------------------
-	
+
 	/**
 	 * Sets the exception and interrupts the target thread,
 	 * if no other exception has occurred so far.
-	 * 
+	 *
 	 * <p>The exception is only set (and the interruption is only triggered),
 	 * if no other exception was set before.
-	 * 
+	 *
 	 * @param t The exception that occurred
 	 */
 	public void reportError(Throwable t) {
@@ -105,7 +106,7 @@ public class ExceptionProxy {
 	/**
 	 * Checks whether an exception has been set via {@link #reportError(Throwable)}.
 	 * If yes, that exception if re-thrown by this method.
-	 * 
+	 *
 	 * @throws Exception This method re-throws the exception, if set.
 	 */
 	public void checkAndThrowException() throws Exception {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
index c68fe28..f3645e3 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
@@ -27,16 +27,16 @@ import static java.util.Objects.requireNonNull;
 /**
  * Flink's description of a partition in a Kafka topic.
  * Serializable, and common across all Kafka consumer subclasses (0.8, 0.9, ...)
- * 
+ *
  * <p>Note: This class must not change in its structure, because it would change the
  * serialization format and make previous savepoints unreadable.
  */
 public final class KafkaTopicPartition implements Serializable {
 
 	/** THIS SERIAL VERSION UID MUST NOT CHANGE, BECAUSE IT WOULD BREAK
-	 * READING OLD SERIALIZED INSTANCES FROM SAVEPOINTS */
+	 * READING OLD SERIALIZED INSTANCES FROM SAVEPOINTS. */
 	private static final long serialVersionUID = 722083576322742325L;
-	
+
 	// ------------------------------------------------------------------------
 
 	private final String topic;
@@ -50,7 +50,7 @@ public final class KafkaTopicPartition implements Serializable {
 	}
 
 	// ------------------------------------------------------------------------
-	
+
 	public String getTopic() {
 		return topic;
 	}
@@ -60,7 +60,7 @@ public final class KafkaTopicPartition implements Serializable {
 	}
 
 	// ------------------------------------------------------------------------
-	
+
 	@Override
 	public String toString() {
 		return "KafkaTopicPartition{" +
@@ -87,7 +87,7 @@ public final class KafkaTopicPartition implements Serializable {
 	public int hashCode() {
 		return cachedHash;
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Utilities
 	// ------------------------------------------------------------------------
@@ -109,10 +109,9 @@ public final class KafkaTopicPartition implements Serializable {
 		return sb.toString();
 	}
 
-
 	public static List<KafkaTopicPartition> dropLeaderData(List<KafkaTopicPartitionLeader> partitionInfos) {
 		List<KafkaTopicPartition> ret = new ArrayList<>(partitionInfos.size());
-		for(KafkaTopicPartitionLeader ktpl: partitionInfos) {
+		for (KafkaTopicPartitionLeader ktpl: partitionInfos) {
 			ret.add(ktpl.getTopicPartition());
 		}
 		return ret;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
index adfbf79..78ab612 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
@@ -20,31 +20,31 @@ package org.apache.flink.streaming.connectors.kafka.internals;
 /**
  * The state that the Flink Kafka Consumer holds for each Kafka partition.
  * Includes the Kafka descriptor for partitions.
- * 
+ *
  * <p>This class describes the most basic state (only the offset), subclasses
  * define more elaborate state, containing current watermarks and timestamp
  * extractors.
- * 
+ *
  * @param <KPH> The type of the Kafka partition descriptor, which varies across Kafka versions.
  */
 public class KafkaTopicPartitionState<KPH> {
-	
+
 	// ------------------------------------------------------------------------
 
-	/** The Flink description of a Kafka partition */
+	/** The Flink description of a Kafka partition. */
 	private final KafkaTopicPartition partition;
 
-	/** The Kafka description of a Kafka partition (varies across different Kafka versions) */
+	/** The Kafka description of a Kafka partition (varies across different Kafka versions). */
 	private final KPH kafkaPartitionHandle;
-	
-	/** The offset within the Kafka partition that we already processed */
+
+	/** The offset within the Kafka partition that we already processed. */
 	private volatile long offset;
 
-	/** The offset of the Kafka partition that has been committed */
+	/** The offset of the Kafka partition that has been committed. */
 	private volatile long committedOffset;
 
 	// ------------------------------------------------------------------------
-	
+
 	public KafkaTopicPartitionState(KafkaTopicPartition partition, KPH kafkaPartitionHandle) {
 		this.partition = partition;
 		this.kafkaPartitionHandle = kafkaPartitionHandle;
@@ -103,7 +103,6 @@ public class KafkaTopicPartitionState<KPH> {
 		return committedOffset;
 	}
 
-	
 	// ------------------------------------------------------------------------
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
index 153a326..c218618 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
@@ -20,7 +20,7 @@ package org.apache.flink.streaming.connectors.kafka.internals;
 /**
  * Magic values used to represent special offset states before partitions are actually read.
  *
- * The values are all negative. Negative offsets are not used by Kafka (invalid), so we
+ * <p>The values are all negative. Negative offsets are not used by Kafka (invalid), so we
  * pick a number that is probably (hopefully) not used by Kafka as a magic number for anything else.
  */
 public class KafkaTopicPartitionStateSentinel {
@@ -31,7 +31,7 @@ public class KafkaTopicPartitionStateSentinel {
 	/**
 	 * Magic number that defines the partition should start from the earliest offset.
 	 *
-	 * This is used as a placeholder so that the actual earliest offset can be evaluated lazily
+	 * <p>This is used as a placeholder so that the actual earliest offset can be evaluated lazily
 	 * when the partition will actually start to be read by the consumer.
 	 */
 	public static final long EARLIEST_OFFSET = -915623761775L;
@@ -39,7 +39,7 @@ public class KafkaTopicPartitionStateSentinel {
 	/**
 	 * Magic number that defines the partition should start from the latest offset.
 	 *
-	 * This is used as a placeholder so that the actual latest offset can be evaluated lazily
+	 * <p>This is used as a placeholder so that the actual latest offset can be evaluated lazily
 	 * when the partition will actually start to be read by the consumer.
 	 */
 	public static final long LATEST_OFFSET = -915623761774L;
@@ -47,7 +47,7 @@ public class KafkaTopicPartitionStateSentinel {
 	/**
 	 * Magic number that defines the partition should start from its committed group offset in Kafka.
 	 *
-	 * This is used as a placeholder so that the actual committed group offset can be evaluated lazily
+	 * <p>This is used as a placeholder so that the actual committed group offset can be evaluated lazily
 	 * when the partition will actually start to be read by the consumer.
 	 */
 	public static final long GROUP_OFFSET = -915623761773L;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
index efdc73f..5116e9f 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
@@ -23,36 +23,35 @@ import org.apache.flink.streaming.api.watermark.Watermark;
 /**
  * A special version of the per-kafka-partition-state that additionally holds
  * a periodic watermark generator (and timestamp extractor) per partition.
- * 
+ *
  * @param <T> The type of records handled by the watermark generator
  * @param <KPH> The type of the Kafka partition descriptor, which varies across Kafka versions.
  */
 public final class KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH> extends KafkaTopicPartitionState<KPH> {
-	
-	/** The timestamp assigner and watermark generator for the partition */
+
+	/** The timestamp assigner and watermark generator for the partition. */
 	private final AssignerWithPeriodicWatermarks<T> timestampsAndWatermarks;
-	
-	/** The last watermark timestamp generated by this partition */
+
+	/** The last watermark timestamp generated by this partition. */
 	private long partitionWatermark;
 
 	// ------------------------------------------------------------------------
-	
+
 	public KafkaTopicPartitionStateWithPeriodicWatermarks(
 			KafkaTopicPartition partition, KPH kafkaPartitionHandle,
-			AssignerWithPeriodicWatermarks<T> timestampsAndWatermarks)
-	{
+			AssignerWithPeriodicWatermarks<T> timestampsAndWatermarks) {
 		super(partition, kafkaPartitionHandle);
-		
+
 		this.timestampsAndWatermarks = timestampsAndWatermarks;
 		this.partitionWatermark = Long.MIN_VALUE;
 	}
 
 	// ------------------------------------------------------------------------
-	
+
 	public long getTimestampForRecord(T record, long kafkaEventTimestamp) {
 		return timestampsAndWatermarks.extractTimestamp(record, kafkaEventTimestamp);
 	}
-	
+
 	public long getCurrentWatermarkTimestamp() {
 		Watermark wm = timestampsAndWatermarks.getCurrentWatermark();
 		if (wm != null) {
@@ -62,7 +61,7 @@ public final class KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH> extend
 	}
 
 	// ------------------------------------------------------------------------
-	
+
 	@Override
 	public String toString() {
 		return "KafkaTopicPartitionStateWithPeriodicWatermarks: partition=" + getKafkaTopicPartition()

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
index edf40ce..f4a80a4 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
@@ -25,35 +25,34 @@ import javax.annotation.Nullable;
 /**
  * A special version of the per-kafka-partition-state that additionally holds
  * a periodic watermark generator (and timestamp extractor) per partition.
- * 
+ *
  * <p>This class is not thread safe, but it gives volatile access to the current
  * partition watermark ({@link #getCurrentPartitionWatermark()}).
- * 
+ *
  * @param <T> The type of records handled by the watermark generator
  * @param <KPH> The type of the Kafka partition descriptor, which varies across Kafka versions
  */
 public final class KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> extends KafkaTopicPartitionState<KPH> {
-	
-	/** The timestamp assigner and watermark generator for the partition */
+
+	/** The timestamp assigner and watermark generator for the partition. */
 	private final AssignerWithPunctuatedWatermarks<T> timestampsAndWatermarks;
-	
-	/** The last watermark timestamp generated by this partition */
+
+	/** The last watermark timestamp generated by this partition. */
 	private volatile long partitionWatermark;
 
 	// ------------------------------------------------------------------------
-	
+
 	public KafkaTopicPartitionStateWithPunctuatedWatermarks(
 			KafkaTopicPartition partition, KPH kafkaPartitionHandle,
-			AssignerWithPunctuatedWatermarks<T> timestampsAndWatermarks)
-	{
+			AssignerWithPunctuatedWatermarks<T> timestampsAndWatermarks) {
 		super(partition, kafkaPartitionHandle);
-		
+
 		this.timestampsAndWatermarks = timestampsAndWatermarks;
 		this.partitionWatermark = Long.MIN_VALUE;
 	}
 
 	// ------------------------------------------------------------------------
-	
+
 	public long getTimestampForRecord(T record, long kafkaEventTimestamp) {
 		return timestampsAndWatermarks.extractTimestamp(record, kafkaEventTimestamp);
 	}
@@ -69,13 +68,13 @@ public final class KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> exte
 			return null;
 		}
 	}
-	
+
 	public long getCurrentPartitionWatermark() {
 		return partitionWatermark;
 	}
 
 	// ------------------------------------------------------------------------
-	
+
 	@Override
 	public String toString() {
 		return "KafkaTopicPartitionStateWithPunctuatedWatermarks: partition=" + getKafkaTopicPartition()

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
index e47c667..6ed3717 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka.partitioner;
 
 import org.apache.flink.util.Preconditions;
@@ -22,9 +23,9 @@ import org.apache.flink.util.Preconditions;
 /**
  * A partitioner ensuring that each internal Flink partition ends up in one Kafka partition.
  *
- * Note, one Kafka partition can contain multiple Flink partitions.
+ * <p>Note, one Kafka partition can contain multiple Flink partitions.
  *
- * Cases:
+ * <p>Cases:
  * 	# More Flink partitions than kafka partitions
  * <pre>
  * 		Flink Sinks:		Kafka Partitions
@@ -35,7 +36,7 @@ import org.apache.flink.util.Preconditions;
  * </pre>
  * Some (or all) kafka partitions contain the output of more than one flink partition
  *
- *# Fewer Flink partitions than Kafka
+ * <p>Fewer Flink partitions than Kafka
  * <pre>
  * 		Flink Sinks:		Kafka Partitions
  * 			1	----------------&gt;	1
@@ -45,9 +46,9 @@ import org.apache.flink.util.Preconditions;
  * 										5
  * </pre>
  *
- *  Not all Kafka partitions contain data
- *  To avoid such an unbalanced partitioning, use a round-robin kafka partitioner (note that this will
- *  cause a lot of network connections between all the Flink instances and all the Kafka brokers).
+ * <p>Not all Kafka partitions contain data
+ * To avoid such an unbalanced partitioning, use a round-robin kafka partitioner (note that this will
+ * cause a lot of network connections between all the Flink instances and all the Kafka brokers).
  */
 public class FlinkFixedPartitioner<T> extends FlinkKafkaPartitioner<T> {
 
@@ -60,13 +61,13 @@ public class FlinkFixedPartitioner<T> extends FlinkKafkaPartitioner<T> {
 
 		this.parallelInstanceId = parallelInstanceId;
 	}
-	
+
 	@Override
 	public int partition(T record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
 		Preconditions.checkArgument(
 			partitions != null && partitions.length > 0,
 			"Partitions of the target topic is empty.");
-		
+
 		return partitions[parallelInstanceId % partitions.length];
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
index b7b4143..168e76b 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka.partitioner;
 
 /**

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowDeserializationSchema.java
index 37241f5..0713738 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowDeserializationSchema.java
@@ -14,11 +14,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.util.List;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+
 import org.apache.avro.Schema;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.io.DatumReader;
@@ -28,13 +29,15 @@ import org.apache.avro.specific.SpecificData;
 import org.apache.avro.specific.SpecificDatumReader;
 import org.apache.avro.specific.SpecificRecord;
 import org.apache.avro.util.Utf8;
-import org.apache.flink.types.Row;
-import org.apache.flink.util.Preconditions;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.util.List;
 
 /**
  * Deserialization schema from Avro bytes over {@link SpecificRecord} to {@link Row}.
  *
- * Deserializes the <code>byte[]</code> messages into (nested) Flink Rows.
+ * <p>Deserializes the <code>byte[]</code> messages into (nested) Flink Rows.
  *
  * {@link Utf8} is converted to regular Java Strings.
  */
@@ -56,7 +59,7 @@ public class AvroRowDeserializationSchema extends AbstractDeserializationSchema<
 	private final MutableByteArrayInputStream inputStream;
 
 	/**
-	 * Avro decoder that decodes binary data
+	 * Avro decoder that decodes binary data.
 	 */
 	private final Decoder decoder;
 
@@ -133,9 +136,7 @@ public class AvroRowDeserializationSchema extends AbstractDeserializationSchema<
 	 * InputStream instance, copying message to process, and creation of Decoder on every new message.
 	 */
 	private static final class MutableByteArrayInputStream extends ByteArrayInputStream {
-		/**
-		 * Create MutableByteArrayInputStream
-		 */
+
 		public MutableByteArrayInputStream() {
 			super(new byte[0]);
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowSerializationSchema.java
index 8388ab5..450c78f 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowSerializationSchema.java
@@ -15,11 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.List;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+
 import org.apache.avro.Schema;
 import org.apache.avro.generic.GenericData;
 import org.apache.avro.generic.GenericRecord;
@@ -30,8 +31,10 @@ import org.apache.avro.specific.SpecificData;
 import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.avro.specific.SpecificRecord;
 import org.apache.avro.util.Utf8;
-import org.apache.flink.types.Row;
-import org.apache.flink.util.Preconditions;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.List;
 
 /**
  * Serialization schema that serializes {@link Row} over {@link SpecificRecord} into a Avro bytes.

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
index d170058..095e964 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
@@ -21,11 +22,10 @@ import com.fasterxml.jackson.databind.node.ObjectNode;
 
 import java.io.IOException;
 
-
 /**
  * DeserializationSchema that deserializes a JSON String into an ObjectNode.
- * <p>
- * Fields can be accessed by calling objectNode.get(&lt;name>).as(&lt;type>)
+ *
+ * <p>Fields can be accessed by calling objectNode.get(&lt;name>).as(&lt;type>)
  */
 public class JSONDeserializationSchema extends AbstractDeserializationSchema<ObjectNode> {
 	private ObjectMapper mapper;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
index 261a111..f75df0c 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
@@ -14,12 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
 
 import java.io.IOException;
 
@@ -27,12 +29,12 @@ import static org.apache.flink.api.java.typeutils.TypeExtractor.getForClass;
 
 /**
  * DeserializationSchema that deserializes a JSON String into an ObjectNode.
- * <p>
- * Key fields can be accessed by calling objectNode.get("key").get(&lt;name>).as(&lt;type>)
- * <p>
- * Value fields can be accessed by calling objectNode.get("value").get(&lt;name>).as(&lt;type>)
- * <p>
- * Metadata fields can be accessed by calling objectNode.get("metadata").get(&lt;name>).as(&lt;type>) and include
+ *
+ * <p>Key fields can be accessed by calling objectNode.get("key").get(&lt;name>).as(&lt;type>)
+ *
+ * <p>Value fields can be accessed by calling objectNode.get("value").get(&lt;name>).as(&lt;type>)
+ *
+ * <p>Metadata fields can be accessed by calling objectNode.get("metadata").get(&lt;name>).as(&lt;type>) and include
  * the "offset" (long), "topic" (String) and "partition" (int).
  */
 public class JSONKeyValueDeserializationSchema implements KeyedDeserializationSchema<ObjectNode> {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
index be201fa..f335c30 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
@@ -18,14 +18,16 @@
 
 package org.apache.flink.streaming.util.serialization;
 
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import java.io.IOException;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
 import org.apache.flink.types.Row;
 import org.apache.flink.util.Preconditions;
 
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import java.io.IOException;
+
 /**
  * Deserialization schema from JSON to {@link Row}.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
index 1998aa6..a3fa379 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
@@ -14,14 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.ObjectNode;
 import org.apache.flink.types.Row;
 import org.apache.flink.util.Preconditions;
 
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ObjectNode;
 
 /**
  * Serialization schema that serializes an object into a JSON bytes.
@@ -33,9 +34,9 @@ import org.apache.flink.util.Preconditions;
  * {@link JsonRowDeserializationSchema}.
  */
 public class JsonRowSerializationSchema implements SerializationSchema<Row> {
-	/** Fields names in the input Row object */
+	/** Fields names in the input Row object. */
 	private final String[] fieldNames;
-	/** Object mapper that is used to create output JSON objects */
+	/** Object mapper that is used to create output JSON objects. */
 	private static ObjectMapper mapper = new ObjectMapper();
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
index b5a33bc..234a96d 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
@@ -26,7 +26,7 @@ import java.io.Serializable;
  * The deserialization schema describes how to turn the byte key / value messages delivered by certain
  * data sources (for example Apache Kafka) into data types (Java/Scala objects) that are
  * processed by Flink.
- * 
+ *
  * @param <T> The type created by the keyed deserialization schema.
  */
 public interface KeyedDeserializationSchema<T> extends Serializable, ResultTypeQueryable<T> {
@@ -46,7 +46,7 @@ public interface KeyedDeserializationSchema<T> extends Serializable, ResultTypeQ
 	/**
 	 * Method to decide whether the element signals the end of the stream. If
 	 * true is returned the element won't be emitted.
-	 * 
+	 *
 	 * @param nextElement The element to test for the end-of-stream signal.
 	 *
 	 * @return True, if the element signals end of stream, false otherwise.

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
index 4b9dba2..e128aba 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -22,7 +23,7 @@ import java.io.IOException;
 
 /**
  * A simple wrapper for using the DeserializationSchema with the KeyedDeserializationSchema
- * interface
+ * interface.
  * @param <T> The type created by the deserialization schema.
  */
 public class KeyedDeserializationSchemaWrapper<T> implements KeyedDeserializationSchema<T> {
@@ -34,6 +35,7 @@ public class KeyedDeserializationSchemaWrapper<T> implements KeyedDeserializatio
 	public KeyedDeserializationSchemaWrapper(DeserializationSchema<T> deserializationSchema) {
 		this.deserializationSchema = deserializationSchema;
 	}
+
 	@Override
 	public T deserialize(byte[] messageKey, byte[] message, String topic, int partition, long offset) throws IOException {
 		return deserializationSchema.deserialize(message);

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
index 701281e..12bcab9 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
 import java.io.Serializable;
@@ -22,7 +23,7 @@ import java.io.Serializable;
  * The serialization schema describes how to turn a data object into a different serialized
  * representation. Most data sinks (for example Apache Kafka) require the data to be handed
  * to them in a specific format (for example as byte strings).
- * 
+ *
  * @param <T> The type to be serialized.
  */
 public interface KeyedSerializationSchema<T> extends Serializable {
@@ -36,17 +37,16 @@ public interface KeyedSerializationSchema<T> extends Serializable {
 	 */
 	byte[] serializeKey(T element);
 
-
 	/**
-	 * Serializes the value of the incoming element to a byte array
-	 * 
+	 * Serializes the value of the incoming element to a byte array.
+	 *
 	 * @param element The incoming element to be serialized
 	 * @return the value of the element as a byte array
 	 */
 	byte[] serializeValue(T element);
 
 	/**
-	 * Optional method to determine the target topic for the element
+	 * Optional method to determine the target topic for the element.
 	 *
 	 * @param element Incoming element to determine the target topic from
 	 * @return null or the target topic

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
index 1b3e486..0a181d1 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
@@ -14,11 +14,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.util.serialization;
 
 /**
  * A simple wrapper for using the SerializationSchema with the KeyedDeserializationSchema
- * interface
+ * interface.
  * @param <T> The type to serialize
  */
 public class KeyedSerializationSchemaWrapper<T> implements KeyedSerializationSchema<T> {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
index 51bc8d1..3e0cdb5 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
@@ -32,30 +32,29 @@ import java.io.IOException;
 /**
  * A serialization and deserialization schema for Key Value Pairs that uses Flink's serialization stack to
  * transform typed from and to byte arrays.
- * 
+ *
  * @param <K> The key type to be serialized.
  * @param <V> The value type to be serialized.
  */
-public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDeserializationSchema<Tuple2<K, V>>, KeyedSerializationSchema<Tuple2<K,V>> {
+public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDeserializationSchema<Tuple2<K, V>>, KeyedSerializationSchema<Tuple2<K, V>> {
 
 	private static final long serialVersionUID = -5359448468131559102L;
 
-	/** The serializer for the key */
+	/** The serializer for the key. */
 	private final TypeSerializer<K> keySerializer;
 
-	/** The serializer for the value */
+	/** The serializer for the value. */
 	private final TypeSerializer<V> valueSerializer;
 
-	/** reusable input deserialization buffer */
+	/** reusable input deserialization buffer. */
 	private final DataInputDeserializer inputDeserializer;
-	
-	/** reusable output serialization buffer for the key */
+
+	/** reusable output serialization buffer for the key. */
 	private transient DataOutputSerializer keyOutputSerializer;
 
-	/** reusable output serialization buffer for the value */
+	/** reusable output serialization buffer for the value. */
 	private transient DataOutputSerializer valueOutputSerializer;
-	
-	
+
 	/** The type information, to be returned by {@link #getProducedType()}. It is
 	 * transient, because it is not serializable. Note that this means that the type information
 	 * is not available at runtime, but only prior to the first serialization / deserialization */
@@ -80,10 +79,10 @@ public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDe
 	/**
 	 * Creates a new de-/serialization schema for the given types. This constructor accepts the types
 	 * as classes and internally constructs the type information from the classes.
-	 * 
+	 *
 	 * <p>If the types are parametrized and cannot be fully defined via classes, use the constructor
 	 * that accepts {@link TypeInformation} instead.
-	 * 
+	 *
 	 * @param keyClass The class of the key de-/serialized by this schema.
 	 * @param valueClass The class of the value de-/serialized by this schema.
 	 * @param config The execution config, which is used to parametrize the type serializers.
@@ -94,12 +93,11 @@ public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDe
 
 	// ------------------------------------------------------------------------
 
-
 	@Override
 	public Tuple2<K, V> deserialize(byte[] messageKey, byte[] message, String topic, int partition, long offset) throws IOException {
 		K key = null;
 		V value = null;
-		
+
 		if (messageKey != null) {
 			inputDeserializer.setBuffer(messageKey, 0, messageKey.length);
 			key = keySerializer.deserialize(inputDeserializer);
@@ -117,11 +115,10 @@ public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDe
 	 * @return Returns false.
 	 */
 	@Override
-	public boolean isEndOfStream(Tuple2<K,V> nextElement) {
+	public boolean isEndOfStream(Tuple2<K, V> nextElement) {
 		return false;
 	}
 
-
 	@Override
 	public byte[] serializeKey(Tuple2<K, V> element) {
 		if (element.f0 == null) {
@@ -182,9 +179,8 @@ public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDe
 		return null; // we are never overriding the topic
 	}
 
-
 	@Override
-	public TypeInformation<Tuple2<K,V>> getProducedType() {
+	public TypeInformation<Tuple2<K, V>> getProducedType() {
 		if (typeInfo != null) {
 			return typeInfo;
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/AvroRowDeSerializationSchemaTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/AvroRowDeSerializationSchemaTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/AvroRowDeSerializationSchemaTest.java
index e13968e..d5be274 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/AvroRowDeSerializationSchemaTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/AvroRowDeSerializationSchemaTest.java
@@ -15,18 +15,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.io.IOException;
-import org.apache.avro.specific.SpecificRecord;
 import org.apache.flink.api.java.tuple.Tuple3;
 import org.apache.flink.streaming.connectors.kafka.testutils.AvroTestUtils;
 import org.apache.flink.streaming.util.serialization.AvroRowDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.AvroRowSerializationSchema;
 import org.apache.flink.types.Row;
-import static org.junit.Assert.assertEquals;
+
+import org.apache.avro.specific.SpecificRecord;
 import org.junit.Test;
 
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
 /**
  * Test for the Avro serialization and deserialization schema.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java
new file mode 100644
index 0000000..b62bdd5
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka;
+
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Tests for the {@link FlinkFixedPartitioner}.
+ */
+public class FlinkFixedPartitionerTest {
+
+	/**
+	 * Test for when there are more sinks than partitions.
+	 * <pre>
+	 *   		Flink Sinks:		Kafka Partitions
+	 * 			1	---------------->	1
+	 * 			2   --------------/
+	 * 			3   -------------/
+	 * 			4	------------/
+	 * </pre>
+	 */
+	@Test
+	public void testMoreFlinkThanBrokers() {
+		FlinkFixedPartitioner<String> part = new FlinkFixedPartitioner<>();
+
+		int[] partitions = new int[]{0};
+
+		part.open(0, 4);
+		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
+
+		part.open(1, 4);
+		Assert.assertEquals(0, part.partition("abc2", null, null, null, partitions));
+
+		part.open(2, 4);
+		Assert.assertEquals(0, part.partition("abc3", null, null, null, partitions));
+		Assert.assertEquals(0, part.partition("abc3", null, null, null, partitions)); // check if it is changing ;)
+
+		part.open(3, 4);
+		Assert.assertEquals(0, part.partition("abc4", null, null, null, partitions));
+	}
+
+	/**
+	 * Tests for when there are more partitions than sinks.
+	 * <pre>
+	 * 		Flink Sinks:		Kafka Partitions
+	 * 			1	---------------->	1
+	 * 			2	---------------->	2
+	 * 									3
+	 * 									4
+	 * 									5
+	 *
+	 * </pre>
+	 */
+	@Test
+	public void testFewerPartitions() {
+		FlinkFixedPartitioner<String> part = new FlinkFixedPartitioner<>();
+
+		int[] partitions = new int[]{0, 1, 2, 3, 4};
+		part.open(0, 2);
+		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
+		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
+
+		part.open(1, 2);
+		Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions));
+		Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions));
+	}
+
+	/*
+	 * 		Flink Sinks:		Kafka Partitions
+	 * 			1	------------>--->	1
+	 * 			2	-----------/----> 	2
+	 * 			3	----------/
+	 */
+	@Test
+	public void testMixedCase() {
+		FlinkFixedPartitioner<String> part = new FlinkFixedPartitioner<>();
+		int[] partitions = new int[]{0, 1};
+
+		part.open(0, 3);
+		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
+
+		part.open(1, 3);
+		Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions));
+
+		part.open(2, 3);
+		Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions));
+
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom11MigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom11MigrationTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom11MigrationTest.java
index c07ebd5..77417ab 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom11MigrationTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom11MigrationTest.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.api.TimeCharacteristic;
@@ -29,6 +30,7 @@ import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition
 import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.SerializedValue;
+
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -36,9 +38,9 @@ import java.net.URL;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.Map;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import static org.mockito.Mockito.mock;
 
@@ -316,7 +318,6 @@ public class FlinkKafkaConsumerBaseFrom11MigrationTest {
 		}
 	}
 
-
 	// ------------------------------------------------------------------------
 
 	private interface FetcherFactory<T> extends Serializable {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom12MigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom12MigrationTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom12MigrationTest.java
index f11bf9f..f13cbe0 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom12MigrationTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseFrom12MigrationTest.java
@@ -15,19 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.kafka;
 
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 import org.apache.flink.core.testutils.OneShotLatch;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
@@ -45,12 +35,25 @@ import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness;
 import org.apache.flink.streaming.util.OperatorSnapshotUtil;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.SerializedValue;
+
 import org.junit.Assert;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 /**
  * Tests for checking whether {@link FlinkKafkaConsumerBase} can restore from snapshots that were
  * done using the Flink 1.2 {@link FlinkKafkaConsumerBase}.
@@ -60,7 +63,7 @@ import org.mockito.stubbing.Answer;
  */
 public class FlinkKafkaConsumerBaseFrom12MigrationTest {
 
-	final static HashMap<KafkaTopicPartition, Long> PARTITION_STATE = new HashMap<>();
+	private static final HashMap<KafkaTopicPartition, Long> PARTITION_STATE = new HashMap<>();
 
 	static {
 		PARTITION_STATE.put(new KafkaTopicPartition("abc", 13), 16768L);
@@ -101,7 +104,6 @@ public class FlinkKafkaConsumerBaseFrom12MigrationTest {
 		StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
 				new StreamSource<>(consumerFunction);
 
-
 		final AbstractStreamOperatorTestHarness<String> testHarness =
 				new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);
 
@@ -307,8 +309,7 @@ public class FlinkKafkaConsumerBaseFrom12MigrationTest {
 		}
 	}
 
-
-	private static abstract class DummySourceContext
+	private abstract static class DummySourceContext
 			implements SourceFunction.SourceContext<String> {
 
 		private final Object lock = new Object();

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
index ccf2ed2..d673e8e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.commons.collections.map.LinkedMap;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.ListStateDescriptor;
 import org.apache.flink.api.common.state.OperatorStateStore;
@@ -35,6 +34,8 @@ import org.apache.flink.streaming.connectors.kafka.internals.AbstractFetcher;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.SerializedValue;
+
+import org.apache.commons.collections.map.LinkedMap;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Matchers;
@@ -61,6 +62,9 @@ import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+/**
+ * Tests for the {@link FlinkKafkaConsumerBase}.
+ */
 public class FlinkKafkaConsumerBaseTest {
 
 	/**
@@ -77,12 +81,12 @@ public class FlinkKafkaConsumerBaseTest {
 			new DummyFlinkKafkaConsumer<>().assignTimestampsAndWatermarks((AssignerWithPunctuatedWatermarks<Object>) null);
 			fail();
 		} catch (NullPointerException ignored) {}
-		
+
 		@SuppressWarnings("unchecked")
 		final AssignerWithPeriodicWatermarks<String> periodicAssigner = mock(AssignerWithPeriodicWatermarks.class);
 		@SuppressWarnings("unchecked")
 		final AssignerWithPunctuatedWatermarks<String> punctuatedAssigner = mock(AssignerWithPunctuatedWatermarks.class);
-		
+
 		DummyFlinkKafkaConsumer<String> c1 = new DummyFlinkKafkaConsumer<>();
 		c1.assignTimestampsAndWatermarks(periodicAssigner);
 		try {
@@ -189,7 +193,7 @@ public class FlinkKafkaConsumerBaseTest {
 	}
 
 	/**
-	 * Tests that on snapshots, states and offsets to commit to Kafka are correct
+	 * Tests that on snapshots, states and offsets to commit to Kafka are correct.
 	 */
 	@SuppressWarnings("unchecked")
 	@Test
@@ -301,7 +305,7 @@ public class FlinkKafkaConsumerBaseTest {
 		state3.put(new KafkaTopicPartition("def", 7), 987654377L);
 
 		// --------------------------------------------------------------------
-		
+
 		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);
 		when(fetcher.snapshotCurrentState()).thenReturn(state1, state2, state3);
 
@@ -356,7 +360,7 @@ public class FlinkKafkaConsumerBaseTest {
 		assertEquals(state2, snapshot2);
 		assertEquals(2, pendingOffsetsToCommit.size());
 		assertEquals(state2, pendingOffsetsToCommit.get(140L));
-		
+
 		// ack checkpoint 1
 		consumer.notifyCheckpointComplete(138L);
 		assertEquals(1, pendingOffsetsToCommit.size());
@@ -375,12 +379,11 @@ public class FlinkKafkaConsumerBaseTest {
 		assertEquals(state3, snapshot3);
 		assertEquals(2, pendingOffsetsToCommit.size());
 		assertEquals(state3, pendingOffsetsToCommit.get(141L));
-		
+
 		// ack checkpoint 3, subsumes number 2
 		consumer.notifyCheckpointComplete(141L);
 		assertEquals(0, pendingOffsetsToCommit.size());
 
-
 		consumer.notifyCheckpointComplete(666); // invalid checkpoint
 		assertEquals(0, pendingOffsetsToCommit.size());
 
@@ -504,7 +507,6 @@ public class FlinkKafkaConsumerBaseTest {
 		consumer.notifyCheckpointComplete(141L);
 		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap()); // not offsets should be committed
 
-
 		consumer.notifyCheckpointComplete(666); // invalid checkpoint
 		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap()); // not offsets should be committed
 
@@ -535,8 +537,7 @@ public class FlinkKafkaConsumerBaseTest {
 	// ------------------------------------------------------------------------
 
 	private static <T> FlinkKafkaConsumerBase<T> getConsumer(
-			AbstractFetcher<T, ?> fetcher, LinkedMap pendingOffsetsToCommit, boolean running) throws Exception
-	{
+			AbstractFetcher<T, ?> fetcher, LinkedMap pendingOffsetsToCommit, boolean running) throws Exception {
 		FlinkKafkaConsumerBase<T> consumer = new DummyFlinkKafkaConsumer<>();
 		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
 		Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
index 6b2cc02..08c5f01 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
@@ -15,21 +15,23 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.testutils.CheckedThread;
 import org.apache.flink.core.testutils.MultiShotLatch;
 import org.apache.flink.runtime.state.FunctionSnapshotContext;
 import org.apache.flink.streaming.api.operators.StreamSink;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.connectors.kafka.testutils.FakeStandardProducerConfig;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.streaming.connectors.kafka.testutils.FakeStandardProducerConfig;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
+
 import org.apache.kafka.clients.producer.Callback;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerConfig;
@@ -53,10 +55,13 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+/**
+ * Tests for the {@link FlinkKafkaProducerBase}.
+ */
 public class FlinkKafkaProducerBaseTest {
 
 	/**
-	 * Tests that the constructor eagerly checks bootstrap servers are set in config
+	 * Tests that the constructor eagerly checks bootstrap servers are set in config.
 	 */
 	@Test(expected = IllegalArgumentException.class)
 	public void testInstantiationFailsWhenBootstrapServersMissing() throws Exception {
@@ -67,7 +72,7 @@ public class FlinkKafkaProducerBaseTest {
 	}
 
 	/**
-	 * Tests that constructor defaults to key value serializers in config to byte array deserializers if not set
+	 * Tests that constructor defaults to key value serializers in config to byte array deserializers if not set.
 	 */
 	@Test
 	public void testKeyValueDeserializersSetIfMissing() throws Exception {
@@ -83,7 +88,7 @@ public class FlinkKafkaProducerBaseTest {
 	}
 
 	/**
-	 * Tests that partitions list is determinate and correctly provided to custom partitioner
+	 * Tests that partitions list is determinate and correctly provided to custom partitioner.
 	 */
 	@SuppressWarnings("unchecked")
 	@Test
@@ -93,7 +98,7 @@ public class FlinkKafkaProducerBaseTest {
 		RuntimeContext mockRuntimeContext = mock(RuntimeContext.class);
 		when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0);
 		when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1);
-		
+
 		// out-of-order list of 4 partitions
 		List<PartitionInfo> mockPartitionsList = new ArrayList<>(4);
 		mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null));
@@ -118,7 +123,7 @@ public class FlinkKafkaProducerBaseTest {
 	}
 
 	/**
-	 * Test ensuring that if an invoke call happens right after an async exception is caught, it should be rethrown
+	 * Test ensuring that if an invoke call happens right after an async exception is caught, it should be rethrown.
 	 */
 	@Test
 	public void testAsyncErrorRethrownOnInvoke() throws Throwable {
@@ -149,7 +154,7 @@ public class FlinkKafkaProducerBaseTest {
 	}
 
 	/**
-	 * Test ensuring that if a snapshot call happens right after an async exception is caught, it should be rethrown
+	 * Test ensuring that if a snapshot call happens right after an async exception is caught, it should be rethrown.
 	 */
 	@Test
 	public void testAsyncErrorRethrownOnCheckpoint() throws Throwable {
@@ -183,11 +188,11 @@ public class FlinkKafkaProducerBaseTest {
 	 * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint,
 	 * it should be rethrown; we set a timeout because the test will not finish if the logic is broken.
 	 *
-	 * Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds.
+	 * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds.
 	 * The test for that is covered in testAtLeastOnceProducer.
 	 */
 	@SuppressWarnings("unchecked")
-	@Test(timeout=5000)
+	@Test(timeout = 5000)
 	public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable {
 		final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
 			FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
@@ -237,10 +242,10 @@ public class FlinkKafkaProducerBaseTest {
 
 	/**
 	 * Test ensuring that the producer is not dropping buffered records;
-	 * we set a timeout because the test will not finish if the logic is broken
+	 * we set a timeout because the test will not finish if the logic is broken.
 	 */
 	@SuppressWarnings("unchecked")
-	@Test(timeout=10000)
+	@Test(timeout = 10000)
 	public void testAtLeastOnceProducer() throws Throwable {
 		final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
 			FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
@@ -297,10 +302,10 @@ public class FlinkKafkaProducerBaseTest {
 	/**
 	 * This test is meant to assure that testAtLeastOnceProducer is valid by testing that if flushing is disabled,
 	 * the snapshot method does indeed finishes without waiting for pending records;
-	 * we set a timeout because the test will not finish if the logic is broken
+	 * we set a timeout because the test will not finish if the logic is broken.
 	 */
 	@SuppressWarnings("unchecked")
-	@Test(timeout=5000)
+	@Test(timeout = 5000)
 	public void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable {
 		final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
 			FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
@@ -328,8 +333,8 @@ public class FlinkKafkaProducerBaseTest {
 
 	private static class DummyFlinkKafkaProducer<T> extends FlinkKafkaProducerBase<T> {
 		private static final long serialVersionUID = 1L;
-		
-		private final static String DUMMY_TOPIC = "dummy-topic";
+
+		private static final String DUMMY_TOPIC = "dummy-topic";
 
 		private transient KafkaProducer<?, ?> mockProducer;
 		private transient List<Callback> pendingCallbacks;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONDeserializationSchemaTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONDeserializationSchemaTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONDeserializationSchemaTest.java
index 1882a7e..51e483b 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONDeserializationSchemaTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONDeserializationSchemaTest.java
@@ -14,16 +14,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.streaming.util.serialization.JSONDeserializationSchema;
+
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.flink.streaming.util.serialization.JSONDeserializationSchema;
 import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.IOException;
 
+/**
+ * Tests for the {@link JSONDeserializationSchema}.
+ */
 public class JSONDeserializationSchemaTest {
 	@Test
 	public void testDeserialize() throws IOException {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java
index 86d3105..565ef00 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java
@@ -14,16 +14,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.streaming.util.serialization.JSONKeyValueDeserializationSchema;
+
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.flink.streaming.util.serialization.JSONKeyValueDeserializationSchema;
 import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.IOException;
 
+/**
+ * Tests for the{@link JSONKeyValueDeserializationSchema}.
+ */
 public class JSONKeyValueDeserializationSchemaTest {
 	@Test
 	public void testDeserializeWithoutMetadata() throws IOException {
@@ -39,7 +44,6 @@ public class JSONKeyValueDeserializationSchemaTest {
 		JSONKeyValueDeserializationSchema schema = new JSONKeyValueDeserializationSchema(false);
 		ObjectNode deserializedValue = schema.deserialize(serializedKey, serializedValue, "", 0, 0);
 
-
 		Assert.assertTrue(deserializedValue.get("metadata") == null);
 		Assert.assertEquals(4, deserializedValue.get("key").get("index").asInt());
 		Assert.assertEquals("world", deserializedValue.get("value").get("word").asText());

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowDeserializationSchemaTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowDeserializationSchemaTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowDeserializationSchemaTest.java
index f03feeb..186e364 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowDeserializationSchemaTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowDeserializationSchemaTest.java
@@ -18,12 +18,13 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.ObjectNode;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
 import org.apache.flink.table.api.Types;
 import org.apache.flink.types.Row;
-import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ObjectNode;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -35,6 +36,9 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+/**
+ * Tests for the {@link JsonRowDeserializationSchema}.
+ */
 public class JsonRowDeserializationSchemaTest {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowSerializationSchemaTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowSerializationSchemaTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowSerializationSchemaTest.java
index 523eafe..43bde35 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowSerializationSchemaTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/JsonRowSerializationSchemaTest.java
@@ -14,19 +14,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.table.api.Types;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.JsonRowSerializationSchema;
+import org.apache.flink.table.api.Types;
+import org.apache.flink.types.Row;
+
 import org.junit.Test;
 
 import java.io.IOException;
 
 import static org.junit.Assert.assertEquals;
 
+/**
+ * Tests for the {@link JsonRowSerializationSchema}.
+ */
 public class JsonRowSerializationSchemaTest {
 
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerPartitionAssignmentTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerPartitionAssignmentTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerPartitionAssignmentTest.java
index c24640d..0be1d57 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerPartitionAssignmentTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerPartitionAssignmentTest.java
@@ -25,10 +25,10 @@ import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Map;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import static org.junit.Assert.assertEquals;


[11/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kafka*

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
index de201e5..659bbd7 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
@@ -18,22 +18,21 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
-import kafka.api.OffsetRequest;
-import kafka.common.TopicAndPartition;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.metrics.MetricGroup;
-import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.common.Node;
-
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
 import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext;
 import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
+import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.InstantiationUtil;
 import org.apache.flink.util.SerializedValue;
 
+import kafka.api.OffsetRequest;
+import kafka.common.TopicAndPartition;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.common.Node;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,39 +55,38 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * @param <T> The type of elements produced by the fetcher.
  */
 public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
-	
-	static final KafkaTopicPartitionState<TopicAndPartition> MARKER = 
+
+	static final KafkaTopicPartitionState<TopicAndPartition> MARKER =
 			new KafkaTopicPartitionState<>(new KafkaTopicPartition("n/a", -1), new TopicAndPartition("n/a", -1));
 
 	private static final Logger LOG = LoggerFactory.getLogger(Kafka08Fetcher.class);
 
 	// ------------------------------------------------------------------------
 
-	/** The schema to convert between Kafka's byte messages, and Flink's objects */
+	/** The schema to convert between Kafka's byte messages, and Flink's objects. */
 	private final KeyedDeserializationSchema<T> deserializer;
 
-	/** The properties that configure the Kafka connection */
+	/** The properties that configure the Kafka connection. */
 	private final Properties kafkaConfig;
 
-	/** The subtask's runtime context */
+	/** The subtask's runtime context. */
 	private final RuntimeContext runtimeContext;
 
-	/** The queue of partitions that are currently not assigned to a broker connection */
+	/** The queue of partitions that are currently not assigned to a broker connection. */
 	private final ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitionsQueue;
 
-	/** The behavior to use in case that an offset is not valid (any more) for a partition */
+	/** The behavior to use in case that an offset is not valid (any more) for a partition. */
 	private final long invalidOffsetBehavior;
 
-	/** The interval in which to automatically commit (-1 if deactivated) */
+	/** The interval in which to automatically commit (-1 if deactivated). */
 	private final long autoCommitInterval;
 
-	/** The handler that reads/writes offsets from/to ZooKeeper */
+	/** The handler that reads/writes offsets from/to ZooKeeper. */
 	private volatile ZookeeperOffsetHandler zookeeperOffsetHandler;
 
-	/** Flag to track the main work loop as alive */
+	/** Flag to track the main work loop as alive. */
 	private volatile boolean running = true;
 
-
 	public Kafka08Fetcher(
 			SourceContext<T> sourceContext,
 			Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
@@ -98,8 +96,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 			KeyedDeserializationSchema<T> deserializer,
 			Properties kafkaProperties,
 			long autoCommitInterval,
-			boolean useMetrics) throws Exception
-	{
+			boolean useMetrics) throws Exception {
 		super(
 				sourceContext,
 				assignedPartitionsWithInitialOffsets,
@@ -175,7 +172,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 			if (autoCommitInterval > 0) {
 				LOG.info("Starting periodic offset committer, with commit interval of {}ms", autoCommitInterval);
 
-				periodicCommitter = new PeriodicOffsetCommitter(zookeeperOffsetHandler, 
+				periodicCommitter = new PeriodicOffsetCommitter(zookeeperOffsetHandler,
 						subscribedPartitionStates(), errorHandler, autoCommitInterval);
 				periodicCommitter.setName("Periodic Kafka partition offset committer");
 				periodicCommitter.setDaemon(true);
@@ -196,19 +193,18 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 				// wait for max 5 seconds trying to get partitions to assign
 				// if threads shut down, this poll returns earlier, because the threads inject the
 				// special marker into the queue
-				List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToAssign = 
+				List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToAssign =
 						unassignedPartitionsQueue.getBatchBlocking(5000);
 				partitionsToAssign.remove(MARKER);
 
 				if (!partitionsToAssign.isEmpty()) {
 					LOG.info("Assigning {} partitions to broker threads", partitionsToAssign.size());
-					Map<Node, List<KafkaTopicPartitionState<TopicAndPartition>>> partitionsWithLeaders = 
+					Map<Node, List<KafkaTopicPartitionState<TopicAndPartition>>> partitionsWithLeaders =
 							findLeaderForPartitions(partitionsToAssign, kafkaConfig);
 
 					// assign the partitions to the leaders (maybe start the threads)
-					for (Map.Entry<Node, List<KafkaTopicPartitionState<TopicAndPartition>>> partitionsWithLeader : 
-							partitionsWithLeaders.entrySet())
-					{
+					for (Map.Entry<Node, List<KafkaTopicPartitionState<TopicAndPartition>>> partitionsWithLeader :
+							partitionsWithLeaders.entrySet()) {
 						final Node leader = partitionsWithLeader.getKey();
 						final List<KafkaTopicPartitionState<TopicAndPartition>> partitions = partitionsWithLeader.getValue();
 						SimpleConsumerThread<T> brokerThread = brokerToThread.get(leader);
@@ -224,9 +220,9 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 						}
 						else {
 							// put elements into queue of thread
-							ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> newPartitionsQueue = 
+							ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> newPartitionsQueue =
 									brokerThread.getNewPartitionsQueue();
-							
+
 							for (KafkaTopicPartitionState<TopicAndPartition> fp : partitions) {
 								if (!newPartitionsQueue.addIfOpen(fp)) {
 									// we were unable to add the partition to the broker's queue
@@ -389,8 +385,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 	private SimpleConsumerThread<T> createAndStartSimpleConsumerThread(
 			List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions,
 			Node leader,
-			ExceptionProxy errorHandler) throws IOException, ClassNotFoundException
-	{
+			ExceptionProxy errorHandler) throws IOException, ClassNotFoundException {
 		// each thread needs its own copy of the deserializer, because the deserializer is
 		// not necessarily thread safe
 		final KeyedDeserializationSchema<T> clonedDeserializer =
@@ -398,7 +393,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 
 		// seed thread with list of fetch partitions (otherwise it would shut down immediately again
 		SimpleConsumerThread<T> brokerThread = new SimpleConsumerThread<>(
-				this, errorHandler, kafkaConfig, leader, seedPartitions, unassignedPartitionsQueue, 
+				this, errorHandler, kafkaConfig, leader, seedPartitions, unassignedPartitionsQueue,
 				clonedDeserializer, invalidOffsetBehavior);
 
 		brokerThread.setName(String.format("SimpleConsumer - %s - broker-%s (%s:%d)",
@@ -411,7 +406,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 	}
 
 	/**
-	 * Returns a list of unique topics from for the given partitions
+	 * Returns a list of unique topics from for the given partitions.
 	 *
 	 * @param partitions A the partitions
 	 * @return A list of unique topics
@@ -425,26 +420,25 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 	}
 
 	/**
-	 * Find leaders for the partitions
+	 * Find leaders for the partitions.
 	 *
-	 * From a high level, the method does the following:
+	 * <p>From a high level, the method does the following:
 	 *	 - Get a list of FetchPartitions (usually only a few partitions)
 	 *	 - Get the list of topics from the FetchPartitions list and request the partitions for the topics. (Kafka doesn't support getting leaders for a set of partitions)
-	 *	 - Build a Map<Leader, List<FetchPartition>> where only the requested partitions are contained.
+	 *	 - Build a Map&lt;Leader, List&lt;FetchPartition&gt;&gt; where only the requested partitions are contained.
 	 *
 	 * @param partitionsToAssign fetch partitions list
 	 * @return leader to partitions map
 	 */
 	private static Map<Node, List<KafkaTopicPartitionState<TopicAndPartition>>> findLeaderForPartitions(
 			List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToAssign,
-			Properties kafkaProperties) throws Exception
-	{
+			Properties kafkaProperties) throws Exception {
 		if (partitionsToAssign.isEmpty()) {
 			throw new IllegalArgumentException("Leader request for empty partitions list");
 		}
 
 		LOG.info("Refreshing leader information for partitions {}", partitionsToAssign);
-		
+
 		// this request is based on the topic names
 		PartitionInfoFetcher infoFetcher = new PartitionInfoFetcher(getTopics(partitionsToAssign), kafkaProperties);
 		infoFetcher.start();
@@ -465,7 +459,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 		// final mapping from leader -> list(fetchPartition)
 		Map<Node, List<KafkaTopicPartitionState<TopicAndPartition>>> leaderToPartitions = new HashMap<>();
 
-		for(KafkaTopicPartitionLeader partitionLeader: topicPartitionWithLeaderList) {
+		for (KafkaTopicPartitionLeader partitionLeader: topicPartitionWithLeaderList) {
 			if (unassignedPartitions.size() == 0) {
 				// we are done: all partitions are assigned
 				break;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
index 574d9f7..b5998f4 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
@@ -21,7 +21,7 @@ package org.apache.flink.streaming.connectors.kafka.internals;
 /**
  * A watch dog thread that forcibly kills another thread, if that thread does not
  * finish in time.
- * 
+ *
  * <p>This uses the discouraged {@link Thread#stop()} method. While this is not
  * advisable, this watch dog is only for extreme cases of thread that simply
  * to not terminate otherwise.

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
index d8d927d..ecf1378 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
@@ -31,7 +31,6 @@ class PartitionInfoFetcher extends Thread {
 	private volatile List<KafkaTopicPartitionLeader> result;
 	private volatile Throwable error;
 
-
 	PartitionInfoFetcher(List<String> topics, Properties properties) {
 		this.topics = topics;
 		this.properties = properties;
@@ -63,4 +62,4 @@ class PartitionInfoFetcher extends Thread {
 		}
 		throw new Exception("Partition fetching failed");
 	}
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
index 27d90f2..13a926f 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
@@ -28,30 +28,29 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  */
 public class PeriodicOffsetCommitter extends Thread {
 
-	/** The ZooKeeper handler */
+	/** The ZooKeeper handler. */
 	private final ZookeeperOffsetHandler offsetHandler;
-	
+
 	private final KafkaTopicPartitionState<?>[] partitionStates;
-	
-	/** The proxy to forward exceptions to the main thread */
+
+	/** The proxy to forward exceptions to the main thread. */
 	private final ExceptionProxy errorHandler;
-	
-	/** Interval in which to commit, in milliseconds */
+
+	/** Interval in which to commit, in milliseconds. */
 	private final long commitInterval;
-	
-	/** Flag to mark the periodic committer as running */
+
+	/** Flag to mark the periodic committer as running. */
 	private volatile boolean running = true;
 
 	PeriodicOffsetCommitter(ZookeeperOffsetHandler offsetHandler,
 			KafkaTopicPartitionState<?>[] partitionStates,
 			ExceptionProxy errorHandler,
-			long commitInterval)
-	{
+			long commitInterval) {
 		this.offsetHandler = checkNotNull(offsetHandler);
 		this.partitionStates = checkNotNull(partitionStates);
 		this.errorHandler = checkNotNull(errorHandler);
 		this.commitInterval = commitInterval;
-		
+
 		checkArgument(commitInterval > 0);
 	}
 
@@ -66,7 +65,7 @@ public class PeriodicOffsetCommitter extends Thread {
 				for (KafkaTopicPartitionState<?> partitionState : partitionStates) {
 					offsetsToCommit.put(partitionState.getKafkaTopicPartition(), partitionState.getOffset());
 				}
-				
+
 				offsetHandler.prepareAndCommitOffsets(offsetsToCommit);
 			}
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
index c78c085..abc61fa 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
@@ -18,6 +18,9 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
+import org.apache.flink.util.ExceptionUtils;
+
 import kafka.api.FetchRequestBuilder;
 import kafka.api.OffsetRequest;
 import kafka.api.PartitionOffsetRequestInfo;
@@ -28,12 +31,7 @@ import kafka.javaapi.OffsetResponse;
 import kafka.javaapi.consumer.SimpleConsumer;
 import kafka.javaapi.message.ByteBufferMessageSet;
 import kafka.message.MessageAndOffset;
-
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
-import org.apache.flink.util.ExceptionUtils;
-
 import org.apache.kafka.common.Node;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,8 +51,8 @@ import static org.apache.flink.util.PropertiesUtil.getInt;
 /**
  * This class implements a thread with a connection to a single Kafka broker. The thread
  * pulls records for a set of topic partitions for which the connected broker is currently
- * the leader. The thread deserializes these records and emits them. 
- * 
+ * the leader. The thread deserializes these records and emits them.
+ *
  * @param <T> The type of elements that this consumer thread creates from Kafka's byte messages
  *            and emits into the Flink DataStream.
  */
@@ -63,28 +61,27 @@ class SimpleConsumerThread<T> extends Thread {
 	private static final Logger LOG = LoggerFactory.getLogger(SimpleConsumerThread.class);
 
 	private static final KafkaTopicPartitionState<TopicAndPartition> MARKER = Kafka08Fetcher.MARKER;
-	
+
 	// ------------------------------------------------------------------------
 
 	private final Kafka08Fetcher<T> owner;
-	
+
 	private final KeyedDeserializationSchema<T> deserializer;
 
 	private final List<KafkaTopicPartitionState<TopicAndPartition>> partitions;
 
 	private final Node broker;
 
-	/** Queue containing new fetch partitions for the consumer thread */
+	/** Queue containing new fetch partitions for the consumer thread. */
 	private final ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> newPartitionsQueue;
-	
+
 	private final ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitions;
-	
+
 	private final ExceptionProxy errorHandler;
-	
+
 	private final long invalidOffsetBehavior;
-	
+
 	private volatile boolean running = true;
-	
 
 	// ----------------- Simple Consumer ----------------------
 	private volatile SimpleConsumer consumer;
@@ -96,7 +93,6 @@ class SimpleConsumerThread<T> extends Thread {
 	private final int bufferSize;
 	private final int reconnectLimit;
 
-
 	// exceptions are thrown locally
 	public SimpleConsumerThread(
 			Kafka08Fetcher<T> owner,
@@ -106,8 +102,7 @@ class SimpleConsumerThread<T> extends Thread {
 			List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions,
 			ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitions,
 			KeyedDeserializationSchema<T> deserializer,
-			long invalidOffsetBehavior)
-	{
+			long invalidOffsetBehavior) {
 		this.owner = owner;
 		this.errorHandler = errorHandler;
 		this.broker = broker;
@@ -118,7 +113,7 @@ class SimpleConsumerThread<T> extends Thread {
 		this.unassignedPartitions = requireNonNull(unassignedPartitions);
 		this.newPartitionsQueue = new ClosableBlockingQueue<>();
 		this.invalidOffsetBehavior = invalidOffsetBehavior;
-		
+
 		// these are the actual configuration values of Kafka + their original default values.
 		this.soTimeout = getInt(config, "socket.timeout.ms", 30000);
 		this.minBytes = getInt(config, "fetch.min.bytes", 1);
@@ -131,11 +126,11 @@ class SimpleConsumerThread<T> extends Thread {
 	public ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> getNewPartitionsQueue() {
 		return newPartitionsQueue;
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  main work loop
 	// ------------------------------------------------------------------------
-	
+
 	@Override
 	public void run() {
 		LOG.info("Starting to fetch from {}", this.partitions);
@@ -146,7 +141,7 @@ class SimpleConsumerThread<T> extends Thread {
 		try {
 			// create the Kafka consumer that we actually use for fetching
 			consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId);
-			
+
 			// replace earliest of latest starting offsets with actual offset values fetched from Kafka
 			requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, partitions);
 
@@ -169,16 +164,16 @@ class SimpleConsumerThread<T> extends Thread {
 					// if the new partitions are to start from earliest or latest offsets,
 					// we need to replace them with actual values from Kafka
 					requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, newPartitions);
-					
+
 					// add the new partitions (and check they are not already in there)
 					for (KafkaTopicPartitionState<TopicAndPartition> newPartition: newPartitions) {
 						if (partitions.contains(newPartition)) {
-							throw new IllegalStateException("Adding partition " + newPartition + 
+							throw new IllegalStateException("Adding partition " + newPartition +
 									" to subscribed partitions even though it is already subscribed");
 						}
 						partitions.add(newPartition);
 					}
-					
+
 					LOG.info("Adding {} new partitions to consumer thread {}", newPartitions.size(), getName());
 					LOG.debug("Partitions list: {}", newPartitions);
 				}
@@ -187,8 +182,8 @@ class SimpleConsumerThread<T> extends Thread {
 					if (newPartitionsQueue.close()) {
 						// close succeeded. Closing thread
 						running = false;
-						
-						LOG.info("Consumer thread {} does not have any partitions assigned anymore. Stopping thread.", 
+
+						LOG.info("Consumer thread {} does not have any partitions assigned anymore. Stopping thread.",
 								getName());
 
 						// add the wake-up marker into the queue to make the main thread
@@ -199,7 +194,7 @@ class SimpleConsumerThread<T> extends Thread {
 					} else {
 						// close failed: fetcher main thread concurrently added new partitions into the queue.
 						// go to top of loop again and get the new partitions
-						continue; 
+						continue;
 					}
 				}
 
@@ -217,7 +212,7 @@ class SimpleConsumerThread<T> extends Thread {
 							partition.getOffset() + 1, // request the next record
 							fetchSize);
 				}
-				
+
 				kafka.api.FetchRequest fetchRequest = frb.build();
 				LOG.debug("Issuing fetch request {}", fetchRequest);
 
@@ -230,7 +225,7 @@ class SimpleConsumerThread<T> extends Thread {
 					if (cce instanceof ClosedChannelException) {
 						LOG.warn("Fetch failed because of ClosedChannelException.");
 						LOG.debug("Full exception", cce);
-						
+
 						// we don't know if the broker is overloaded or unavailable.
 						// retry a few times, then return ALL partitions for new leader lookup
 						if (++reconnects >= reconnectLimit) {
@@ -261,15 +256,15 @@ class SimpleConsumerThread<T> extends Thread {
 				if (fetchResponse == null) {
 					throw new IOException("Fetch from Kafka failed (request returned null)");
 				}
-				
+
 				if (fetchResponse.hasError()) {
 					String exception = "";
 					List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToGetOffsetsFor = new ArrayList<>();
-					
+
 					// iterate over partitions to get individual error codes
 					Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator();
 					boolean partitionsRemoved = false;
-					
+
 					while (partitionsIterator.hasNext()) {
 						final KafkaTopicPartitionState<TopicAndPartition> fp = partitionsIterator.next();
 						short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition());
@@ -282,8 +277,7 @@ class SimpleConsumerThread<T> extends Thread {
 						else if (code == ErrorMapping.NotLeaderForPartitionCode() ||
 								code == ErrorMapping.LeaderNotAvailableCode() ||
 								code == ErrorMapping.BrokerNotAvailableCode() ||
-								code == ErrorMapping.UnknownCode())
-						{
+								code == ErrorMapping.UnknownCode()) {
 							// the broker we are connected to is not the leader for the partition.
 							LOG.warn("{} is not the leader of {}. Reassigning leader for partition", broker, fp);
 							LOG.debug("Error code = {}", code);
@@ -294,7 +288,7 @@ class SimpleConsumerThread<T> extends Thread {
 							partitionsRemoved = true;
 						}
 						else if (code != ErrorMapping.NoError()) {
-							exception += "\nException for " + fp.getTopic() +":"+ fp.getPartition() + ": " +
+							exception += "\nException for " + fp.getTopic() + ":" + fp.getPartition() + ": " +
 									ExceptionUtils.stringifyException(ErrorMapping.exceptionFor(code));
 						}
 					}
@@ -307,7 +301,7 @@ class SimpleConsumerThread<T> extends Thread {
 						// get valid offsets for these partitions and try again.
 						LOG.warn("The following partitions had an invalid offset: {}", partitionsToGetOffsetsFor);
 						requestAndSetSpecificTimeOffsetsFromKafka(consumer, partitionsToGetOffsetsFor, invalidOffsetBehavior);
-						
+
 						LOG.warn("The new partition offsets are {}", partitionsToGetOffsetsFor);
 						continue; // jump back to create a new fetch request. The offset has not been touched.
 					}
@@ -316,7 +310,7 @@ class SimpleConsumerThread<T> extends Thread {
 					}
 					else {
 						// partitions failed on an error
-						throw new IOException("Error while fetching from broker '" + broker +"': " + exception);
+						throw new IOException("Error while fetching from broker '" + broker + "': " + exception);
 					}
 				} else {
 					// successful fetch, reset offsetOutOfRangeCount.
@@ -328,11 +322,11 @@ class SimpleConsumerThread<T> extends Thread {
 				int messagesInFetch = 0;
 				int deletedMessages = 0;
 				Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator();
-				
+
 				partitionsLoop:
 				while (partitionsIterator.hasNext()) {
 					final KafkaTopicPartitionState<TopicAndPartition> currentPartition = partitionsIterator.next();
-					
+
 					final ByteBufferMessageSet messageSet = fetchResponse.messageSet(
 							currentPartition.getTopic(), currentPartition.getPartition());
 
@@ -341,7 +335,7 @@ class SimpleConsumerThread<T> extends Thread {
 							messagesInFetch++;
 							final ByteBuffer payload = msg.message().payload();
 							final long offset = msg.offset();
-							
+
 							if (offset <= currentPartition.getOffset()) {
 								// we have seen this message already
 								LOG.info("Skipping message with offset " + msg.offset()
@@ -373,15 +367,15 @@ class SimpleConsumerThread<T> extends Thread {
 								keyPayload.get(keyBytes);
 							}
 
-							final T value = deserializer.deserialize(keyBytes, valueBytes, 
+							final T value = deserializer.deserialize(keyBytes, valueBytes,
 									currentPartition.getTopic(), currentPartition.getPartition(), offset);
-							
+
 							if (deserializer.isEndOfStream(value)) {
 								// remove partition from subscribed partitions.
 								partitionsIterator.remove();
 								continue partitionsLoop;
 							}
-							
+
 							owner.emitRecord(value, currentPartition, offset);
 						}
 						else {
@@ -427,7 +421,7 @@ class SimpleConsumerThread<T> extends Thread {
 
 		this.interrupt();
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Kafka Request Utils
 	// ------------------------------------------------------------------------
@@ -442,8 +436,7 @@ class SimpleConsumerThread<T> extends Thread {
 	private static void requestAndSetSpecificTimeOffsetsFromKafka(
 			SimpleConsumer consumer,
 			List<KafkaTopicPartitionState<TopicAndPartition>> partitions,
-			long whichTime) throws IOException
-	{
+			long whichTime) throws IOException {
 		Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
 		for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) {
 			requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(whichTime, 1));
@@ -461,8 +454,7 @@ class SimpleConsumerThread<T> extends Thread {
 	 */
 	private static void requestAndSetEarliestOrLatestOffsetsFromKafka(
 			SimpleConsumer consumer,
-			List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception
-	{
+			List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception {
 		Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
 		for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) {
 			if (part.getOffset() == OffsetRequest.EarliestTime() || part.getOffset() == OffsetRequest.LatestTime()) {
@@ -486,8 +478,7 @@ class SimpleConsumerThread<T> extends Thread {
 	private static void requestAndSetOffsetsFromKafka(
 			SimpleConsumer consumer,
 			List<KafkaTopicPartitionState<TopicAndPartition>> partitionStates,
-			Map<TopicAndPartition, PartitionOffsetRequestInfo> partitionToRequestInfo) throws IOException
-	{
+			Map<TopicAndPartition, PartitionOffsetRequestInfo> partitionToRequestInfo) throws IOException {
 		int retries = 0;
 		OffsetResponse response;
 		while (true) {
@@ -529,8 +520,7 @@ class SimpleConsumerThread<T> extends Thread {
 	}
 
 	private static void checkAllPartitionsHaveDefinedStartingOffsets(
-		List<KafkaTopicPartitionState<TopicAndPartition>> partitions)
-	{
+		List<KafkaTopicPartitionState<TopicAndPartition>> partitions) {
 		for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) {
 			if (!part.isOffsetDefined()) {
 				throw new IllegalArgumentException("SimpleConsumerThread received a partition with undefined starting offset");

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
index c02c2cb..b6822e2 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
@@ -18,15 +18,14 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
-import kafka.utils.ZKGroupTopicDirs;
+import org.apache.flink.configuration.ConfigConstants;
 
+import kafka.utils.ZKGroupTopicDirs;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.flink.configuration.ConfigConstants;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,14 +44,13 @@ public class ZookeeperOffsetHandler {
 
 	private final CuratorFramework curatorClient;
 
-
 	public ZookeeperOffsetHandler(Properties props) {
 		this.groupId = props.getProperty(ConsumerConfig.GROUP_ID_CONFIG);
 		if (this.groupId == null) {
 			throw new IllegalArgumentException("Required property '"
 					+ ConsumerConfig.GROUP_ID_CONFIG + "' has not been set");
 		}
-		
+
 		String zkConnect = props.getProperty("zookeeper.connect");
 		if (zkConnect == null) {
 			throw new IllegalArgumentException("Required property 'zookeeper.connect' has not been set");
@@ -61,16 +59,16 @@ public class ZookeeperOffsetHandler {
 		// we use Curator's default timeouts
 		int sessionTimeoutMs =  Integer.valueOf(props.getProperty("zookeeper.session.timeout.ms", "60000"));
 		int connectionTimeoutMs = Integer.valueOf(props.getProperty("zookeeper.connection.timeout.ms", "15000"));
-		
+
 		// undocumented config options allowing users to configure the retry policy. (they are "flink." prefixed as they are no official kafka configs)
 		int backoffBaseSleepTime = Integer.valueOf(props.getProperty("flink.zookeeper.base-sleep-time.ms", "100"));
 		int backoffMaxRetries =  Integer.valueOf(props.getProperty("flink.zookeeper.max-retries", "10"));
-		
+
 		RetryPolicy retryPolicy = new ExponentialBackoffRetry(backoffBaseSleepTime, backoffMaxRetries);
 		curatorClient = CuratorFrameworkFactory.newClient(zkConnect, sessionTimeoutMs, connectionTimeoutMs, retryPolicy);
 		curatorClient.start();
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Offset access and manipulation
 	// ------------------------------------------------------------------------
@@ -79,7 +77,7 @@ public class ZookeeperOffsetHandler {
 	 * Commits offsets for Kafka partitions to ZooKeeper. The given offsets to this method should be the offsets of
 	 * the last processed records; this method will take care of incrementing the offsets by 1 before committing them so
 	 * that the committed offsets to Zookeeper represent the next record to process.
-	 * 
+	 *
 	 * @param internalOffsets The internal offsets (representing last processed records) for the partitions to commit.
 	 * @throws Exception The method forwards exceptions.
 	 */
@@ -105,7 +103,7 @@ public class ZookeeperOffsetHandler {
 
 	/**
 	 * Closes the offset handler.
-	 * 
+	 *
 	 * @throws IOException Thrown, if the handler cannot be closed properly.
 	 */
 	public void close() throws IOException {
@@ -115,7 +113,7 @@ public class ZookeeperOffsetHandler {
 	// ------------------------------------------------------------------------
 	//  Communication with Zookeeper
 	// ------------------------------------------------------------------------
-	
+
 	public static void setOffsetInZooKeeper(CuratorFramework curatorClient, String groupId, String topic, int partition, long offset) throws Exception {
 		ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupId, topic);
 		String path = topicDirs.consumerOffsetDir() + "/" + partition;
@@ -128,9 +126,9 @@ public class ZookeeperOffsetHandler {
 		ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupId, topic);
 		String path = topicDirs.consumerOffsetDir() + "/" + partition;
 		curatorClient.newNamespaceAwareEnsurePath(path).ensure(curatorClient.getZookeeperClient());
-		
+
 		byte[] data = curatorClient.getData().forPath(path);
-		
+
 		if (data == null) {
 			return null;
 		} else {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSourceTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSourceTest.java
index 2dedecb..a704c2f 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSourceTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSourceTest.java
@@ -18,12 +18,16 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.streaming.util.serialization.AvroRowDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.types.Row;
 
+import java.util.Properties;
+
+/**
+ * Tests for the {@link Kafka08AvroTableSource}.
+ */
 public class Kafka08AvroTableSourceTest extends KafkaTableSourceTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
index 8cc735d..20dc6b7 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
@@ -17,12 +17,12 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.curator.framework.CuratorFramework;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.ZookeeperOffsetHandler;
 
+import org.apache.curator.framework.CuratorFramework;
 import org.junit.Test;
 
 import java.util.Properties;
@@ -31,6 +31,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+/**
+ * IT cases for Kafka 0.8 .
+ */
 public class Kafka08ITCase extends KafkaConsumerTestBase {
 
 	// ------------------------------------------------------------------------
@@ -42,7 +45,6 @@ public class Kafka08ITCase extends KafkaConsumerTestBase {
 		runFailOnNoBrokerTest();
 	}
 
-
 	@Test(timeout = 60000)
 	public void testConcurrentProducerConsumerTopology() throws Exception {
 		runSimpleConcurrentProducerConsumerTopology();
@@ -79,7 +81,7 @@ public class Kafka08ITCase extends KafkaConsumerTestBase {
 		final String topic = writeSequence("invalidOffsetTopic", 20, parallelism, 1);
 
 		// set invalid offset:
-		CuratorFramework curatorClient = ((KafkaTestEnvironmentImpl)kafkaServer).createCuratorClient();
+		CuratorFramework curatorClient = ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();
 		ZookeeperOffsetHandler.setOffsetInZooKeeper(curatorClient, standardProps.getProperty("group.id"), topic, 0, 1234);
 		curatorClient.close();
 
@@ -166,7 +168,7 @@ public class Kafka08ITCase extends KafkaConsumerTestBase {
 
 			final Long offset = (long) (Math.random() * Long.MAX_VALUE);
 
-			CuratorFramework curatorFramework = ((KafkaTestEnvironmentImpl)kafkaServer ).createCuratorClient();
+			CuratorFramework curatorFramework = ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();
 			kafkaServer.createTestTopic(topicName, 3, 2);
 
 			ZookeeperOffsetHandler.setOffsetInZooKeeper(curatorFramework, groupId, topicName, 0, offset);
@@ -211,7 +213,7 @@ public class Kafka08ITCase extends KafkaConsumerTestBase {
 		readSequence(env, StartupMode.GROUP_OFFSETS, null, readProps, parallelism, topicName, 100, 0);
 
 		// get the offset
-		CuratorFramework curatorFramework = ((KafkaTestEnvironmentImpl)kafkaServer).createCuratorClient();
+		CuratorFramework curatorFramework = ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();
 
 		Long o1 = ZookeeperOffsetHandler.getOffsetFromZooKeeper(curatorFramework, standardProps.getProperty("group.id"), topicName, 0);
 		Long o2 = ZookeeperOffsetHandler.getOffsetFromZooKeeper(curatorFramework, standardProps.getProperty("group.id"), topicName, 1);
@@ -223,7 +225,7 @@ public class Kafka08ITCase extends KafkaConsumerTestBase {
 		boolean atLeastOneOffsetSet = (o1 != null && o1 > 0 && o1 <= 100) ||
 			(o2 != null && o2 > 0 && o2 <= 100) ||
 			(o3 != null && o3 > 0 && o3 <= 100);
-		assertTrue("Expecting at least one offset to be set o1="+o1+" o2="+o2+" o3="+o3, atLeastOneOffsetSet);
+		assertTrue("Expecting at least one offset to be set o1=" + o1 + " o2=" + o2 + " o3=" + o3, atLeastOneOffsetSet);
 
 		deleteTestTopic(topicName);
 	}
@@ -245,7 +247,7 @@ public class Kafka08ITCase extends KafkaConsumerTestBase {
 		runAllDeletesTest();
 	}
 
-	@Test(timeout=60000)
+	@Test(timeout = 60000)
 	public void testEndOfStream() throws Exception {
 		runEndOfStreamTest();
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
index 2136476..ac92c8a 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
@@ -15,15 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.util.serialization.JsonRowSerializationSchema;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 
+/**
+ * Tests for the {@link Kafka08JsonTableSink}.
+ */
 public class Kafka08JsonTableSinkTest extends KafkaTableSinkTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSourceTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSourceTest.java
index 27faff4..adcd3a2 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSourceTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSourceTest.java
@@ -18,12 +18,16 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
+import org.apache.flink.types.Row;
 
+import java.util.Properties;
+
+/**
+ * Tests for the {@link Kafka08JsonTableSource}.
+ */
 public class Kafka08JsonTableSourceTest extends KafkaTableSourceTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ProducerITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ProducerITCase.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ProducerITCase.java
index 5c951db..8074765 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ProducerITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ProducerITCase.java
@@ -18,9 +18,11 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-
 import org.junit.Test;
 
+/**
+ * IT cases for the {@link FlinkKafkaProducer08}.
+ */
 @SuppressWarnings("serial")
 public class Kafka08ProducerITCase extends KafkaProducerTestBase {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumer08Test.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumer08Test.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumer08Test.java
index 83cdd90..a43609a 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumer08Test.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumer08Test.java
@@ -18,34 +18,11 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.powermock.api.mockito.PowerMockito.when;
-
-import java.net.InetAddress;
-import java.net.URL;
-import java.net.UnknownHostException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
-import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
 import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
-import org.apache.flink.streaming.connectors.kafka.config.OffsetCommitMode;
-import org.apache.flink.streaming.connectors.kafka.internals.AbstractFetcher;
-import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
-import org.apache.flink.streaming.util.serialization.DeserializationSchema;
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 import org.apache.flink.util.NetUtils;
-import org.apache.flink.util.SerializedValue;
+
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -56,6 +33,20 @@ import org.powermock.core.classloader.annotations.PowerMockIgnore;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
+import java.net.InetAddress;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.Collections;
+import java.util.Properties;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.powermock.api.mockito.PowerMockito.when;
+
+/**
+ * Tests for the {@link FlinkKafkaConsumer08}.
+ */
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(FlinkKafkaConsumer08.class)
 @PowerMockIgnore("javax.management.*")
@@ -101,7 +92,7 @@ public class KafkaConsumer08Test {
 			fail(e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testCreateSourceWithoutCluster() {
 		try {
@@ -181,7 +172,7 @@ public class KafkaConsumer08Test {
 							+ " config are invalid"));
 		}
 	}
-	
+
 	private Properties createKafkaProps(String zookeeperConnect, String bootstrapServers, String groupId) {
 		Properties props = new Properties();
 		props.setProperty("zookeeper.connect", zookeeperConnect);

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaLocalSystemTime.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaLocalSystemTime.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaLocalSystemTime.java
deleted file mode 100644
index 72d2772..0000000
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaLocalSystemTime.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.connectors.kafka;
-
-import kafka.utils.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class KafkaLocalSystemTime implements Time {
-
-	private static final Logger LOG = LoggerFactory.getLogger(KafkaLocalSystemTime.class);
-
-	@Override
-	public long milliseconds() {
-		return System.currentTimeMillis();
-	}
-
-	@Override
-	public long nanoseconds() {
-		return System.nanoTime();
-	}
-
-	@Override
-	public void sleep(long ms) {
-		try {
-			Thread.sleep(ms);
-		} catch (InterruptedException e) {
-			LOG.warn("Interruption", e);
-		}
-	}
-
-}
-

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
index c7da5af..fc8678f 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
@@ -20,9 +20,9 @@ package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.api.operators.StreamSink;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.connectors.kafka.testutils.FakeStandardProducerConfig;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
-import org.apache.flink.streaming.connectors.kafka.testutils.FakeStandardProducerConfig;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 import org.apache.flink.util.TestLogger;
 
@@ -33,7 +33,6 @@ import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.PartitionInfo;
 import org.junit.Test;
 import org.junit.runner.RunWith;
-
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.powermock.core.classloader.annotations.PrepareForTest;
@@ -51,17 +50,20 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.powermock.api.mockito.PowerMockito.whenNew;
 
+/**
+ * Tests for the {@link KafkaProducer}.
+ */
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(FlinkKafkaProducerBase.class)
 public class KafkaProducerTest extends TestLogger {
-	
+
 	@Test
 	@SuppressWarnings("unchecked")
 	public void testPropagateExceptions() {
 		try {
 			// mock kafka producer
 			KafkaProducer<?, ?> kafkaProducerMock = mock(KafkaProducer.class);
-			
+
 			// partition setup
 			when(kafkaProducerMock.partitionsFor(anyString())).thenReturn(
 				// returning a unmodifiable list to mimic KafkaProducer#partitionsFor() behaviour
@@ -77,14 +79,14 @@ public class KafkaProducerTest extends TestLogger {
 						return null;
 					}
 				});
-			
+
 			// make sure the FlinkKafkaProducer instantiates our mock producer
 			whenNew(KafkaProducer.class).withAnyArguments().thenReturn(kafkaProducerMock);
-			
+
 			// (1) producer that propagates errors
 
 			FlinkKafkaProducer08<String> producerPropagating = new FlinkKafkaProducer08<>(
-					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner)null);
+					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner) null);
 
 			OneInputStreamOperatorTestHarness<String, Object> testHarness =
 					new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producerPropagating));
@@ -107,7 +109,7 @@ public class KafkaProducerTest extends TestLogger {
 			// (2) producer that only logs errors
 
 			FlinkKafkaProducer08<String> producerLogging = new FlinkKafkaProducer08<>(
-					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner)null);
+					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner) null);
 			producerLogging.setLogFailuresOnly(true);
 
 			testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink(producerLogging));

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetention08ITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetention08ITCase.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetention08ITCase.java
index c28799c..091fae3 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetention08ITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetention08ITCase.java
@@ -19,15 +19,18 @@ package org.apache.flink.streaming.connectors.kafka;
 
 import org.junit.Test;
 
+/**
+ * {@link KafkaShortRetentionTestBase} for Kafka 0.8 .
+ */
 @SuppressWarnings("serial")
 public class KafkaShortRetention08ITCase extends KafkaShortRetentionTestBase {
 
-	@Test(timeout=60000)
+	@Test(timeout = 60000)
 	public void testAutoOffsetReset() throws Exception {
 		runAutoOffsetResetTest();
 	}
 
-	@Test(timeout=60000)
+	@Test(timeout = 60000)
 	public void testAutoOffsetResetNone() throws Exception {
 		runFailOnAutoOffsetResetNoneEager();
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
index 2419b53..6b15007 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
@@ -15,15 +15,26 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.operators.StreamSink;
+import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionLeader;
+import org.apache.flink.streaming.connectors.kafka.internals.ZookeeperOffsetHandler;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.connectors.kafka.testutils.ZooKeeperStringSerializer;
+import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
+import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
+import org.apache.flink.util.NetUtils;
+
 import kafka.admin.AdminUtils;
 import kafka.api.PartitionMetadata;
 import kafka.common.KafkaException;
 import kafka.network.SocketServer;
 import kafka.server.KafkaConfig;
 import kafka.server.KafkaServer;
+import kafka.utils.SystemTime$;
 import org.I0Itec.zkclient.ZkClient;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.RetryPolicy;
@@ -31,19 +42,8 @@ import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.curator.test.TestingServer;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.datastream.DataStreamSink;
-import org.apache.flink.streaming.api.operators.StreamSink;
-import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionLeader;
-import org.apache.flink.streaming.connectors.kafka.internals.ZookeeperOffsetHandler;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.streaming.connectors.kafka.testutils.ZooKeeperStringSerializer;
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
-import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
-import org.apache.flink.util.NetUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.collection.Seq;
 
 import java.io.File;
 import java.io.IOException;
@@ -55,12 +55,14 @@ import java.util.List;
 import java.util.Properties;
 import java.util.UUID;
 
+import scala.collection.Seq;
+
 import static org.apache.flink.util.NetUtils.hostAndPortToUrlString;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
- * An implementation of the KafkaServerProvider for Kafka 0.8
+ * An implementation of the KafkaServerProvider for Kafka 0.8 .
  */
 public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 
@@ -166,7 +168,6 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		return false;
 	}
 
-
 	@Override
 	public void prepare(int numKafkaServers, Properties additionalServerProperties, boolean secureMode) {
 		this.additionalServerProperties = additionalServerProperties;
@@ -325,7 +326,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 	}
 
 	/**
-	 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed)
+	 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 	 */
 	protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
 		LOG.info("Starting broker with id {}", brokerId);
@@ -342,7 +343,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		// for CI stability, increase zookeeper session timeout
 		kafkaProperties.put("zookeeper.session.timeout.ms", "30000");
 		kafkaProperties.put("zookeeper.connection.timeout.ms", "30000");
-		if(additionalServerProperties != null) {
+		if (additionalServerProperties != null) {
 			kafkaProperties.putAll(additionalServerProperties);
 		}
 
@@ -354,7 +355,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 			KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);
 
 			try {
-				KafkaServer server = new KafkaServer(kafkaConfig, new KafkaLocalSystemTime());
+				KafkaServer server = new KafkaServer(kafkaConfig, SystemTime$.MODULE$);
 				server.startup();
 				return server;
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java
index 2df67d9..eb07118 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java
@@ -28,7 +28,6 @@ import java.util.concurrent.atomic.AtomicReference;
 import static java.util.Arrays.asList;
 import static java.util.Collections.emptyList;
 import static java.util.Collections.singletonList;
-
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -36,12 +35,15 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+/**
+ * Tests for the {@link ClosableBlockingQueue}.
+ */
 public class ClosableBlockingQueueTest {
 
 	// ------------------------------------------------------------------------
 	//  single-threaded unit tests
 	// ------------------------------------------------------------------------
-	
+
 	@Test
 	public void testCreateQueueHashCodeEquals() {
 		try {
@@ -54,14 +56,14 @@ public class ClosableBlockingQueueTest {
 			assertTrue(queue2.isEmpty());
 			assertEquals(0, queue1.size());
 			assertEquals(0, queue2.size());
-			
+
 			assertTrue(queue1.hashCode() == queue2.hashCode());
 			//noinspection EqualsWithItself
 			assertTrue(queue1.equals(queue1));
 			//noinspection EqualsWithItself
 			assertTrue(queue2.equals(queue2));
 			assertTrue(queue1.equals(queue2));
-			
+
 			assertNotNull(queue1.toString());
 			assertNotNull(queue2.toString());
 
@@ -86,7 +88,7 @@ public class ClosableBlockingQueueTest {
 			//noinspection EqualsWithItself
 			assertTrue(queue4.equals(queue4));
 			assertTrue(queue3.equals(queue4));
-			
+
 			assertNotNull(queue3.toString());
 			assertNotNull(queue4.toString());
 		}
@@ -95,7 +97,7 @@ public class ClosableBlockingQueueTest {
 			fail(e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testCloseEmptyQueue() {
 		try {
@@ -103,10 +105,10 @@ public class ClosableBlockingQueueTest {
 			assertTrue(queue.isOpen());
 			assertTrue(queue.close());
 			assertFalse(queue.isOpen());
-			
+
 			assertFalse(queue.addIfOpen("element"));
 			assertTrue(queue.isEmpty());
-			
+
 			try {
 				queue.add("some element");
 				fail("should cause an exception");
@@ -125,15 +127,15 @@ public class ClosableBlockingQueueTest {
 		try {
 			ClosableBlockingQueue<Integer> queue = new ClosableBlockingQueue<>(asList(1, 2, 3));
 			assertTrue(queue.isOpen());
-			
+
 			assertFalse(queue.close());
 			assertFalse(queue.close());
-			
+
 			queue.poll();
 
 			assertFalse(queue.close());
 			assertFalse(queue.close());
-			
+
 			queue.pollBatch();
 
 			assertTrue(queue.close());
@@ -154,36 +156,36 @@ public class ClosableBlockingQueueTest {
 			fail(e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testPeekAndPoll() {
 		try {
 			ClosableBlockingQueue<String> queue = new ClosableBlockingQueue<>();
-			
+
 			assertNull(queue.peek());
 			assertNull(queue.peek());
 			assertNull(queue.poll());
 			assertNull(queue.poll());
-			
+
 			assertEquals(0, queue.size());
-			
+
 			queue.add("a");
 			queue.add("b");
 			queue.add("c");
 
 			assertEquals(3, queue.size());
-			
+
 			assertEquals("a", queue.peek());
 			assertEquals("a", queue.peek());
 			assertEquals("a", queue.peek());
 
 			assertEquals(3, queue.size());
-			
+
 			assertEquals("a", queue.poll());
 			assertEquals("b", queue.poll());
 
 			assertEquals(1, queue.size());
-			
+
 			assertEquals("c", queue.peek());
 			assertEquals("c", queue.peek());
 
@@ -193,9 +195,9 @@ public class ClosableBlockingQueueTest {
 			assertNull(queue.poll());
 			assertNull(queue.peek());
 			assertNull(queue.peek());
-			
+
 			assertTrue(queue.close());
-			
+
 			try {
 				queue.peek();
 				fail("should cause an exception");
@@ -222,13 +224,13 @@ public class ClosableBlockingQueueTest {
 			ClosableBlockingQueue<String> queue = new ClosableBlockingQueue<>();
 
 			assertNull(queue.pollBatch());
-			
+
 			queue.add("a");
 			queue.add("b");
-			
+
 			assertEquals(asList("a", "b"), queue.pollBatch());
 			assertNull(queue.pollBatch());
-			
+
 			queue.add("c");
 
 			assertEquals(singletonList("c"), queue.pollBatch());
@@ -363,16 +365,16 @@ public class ClosableBlockingQueueTest {
 			fail(e.getMessage());
 		}
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  multi-threaded tests
 	// ------------------------------------------------------------------------
-	
+
 	@Test
 	public void notifyOnClose() {
 		try {
 			final long oneYear = 365L * 24 * 60 * 60 * 1000;
-			
+
 			// test "getBatchBlocking()"
 			final ClosableBlockingQueue<String> queue1 = new ClosableBlockingQueue<>();
 			QueueCall call1 = new QueueCall() {
@@ -418,7 +420,7 @@ public class ClosableBlockingQueueTest {
 			fail(e.getMessage());
 		}
 	}
-	
+
 	@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
 	@Test
 	public void testMultiThreadedAddGet() {
@@ -426,9 +428,9 @@ public class ClosableBlockingQueueTest {
 			final ClosableBlockingQueue<Integer> queue = new ClosableBlockingQueue<>();
 			final AtomicReference<Throwable> pushErrorRef = new AtomicReference<>();
 			final AtomicReference<Throwable> pollErrorRef = new AtomicReference<>();
-			
+
 			final int numElements = 2000;
-			
+
 			Thread pusher = new Thread("pusher") {
 
 				@Override
@@ -437,14 +439,14 @@ public class ClosableBlockingQueueTest {
 						final Random rnd = new Random();
 						for (int i = 0; i < numElements; i++) {
 							queue.add(i);
-							
+
 							// sleep a bit, sometimes
 							int sleepTime = rnd.nextInt(3);
 							if (sleepTime > 1) {
 								Thread.sleep(sleepTime);
 							}
 						}
-						
+
 						while (true) {
 							if (queue.close()) {
 								break;
@@ -466,11 +468,11 @@ public class ClosableBlockingQueueTest {
 				public void run() {
 					try {
 						int count = 0;
-						
+
 						try {
 							final Random rnd = new Random();
 							int nextExpected = 0;
-							
+
 							while (true) {
 								int getMethod = count % 7;
 								switch (getMethod) {
@@ -534,7 +536,7 @@ public class ClosableBlockingQueueTest {
 										count++;
 									}
 								}
-								
+
 								// sleep a bit, sometimes
 								int sleepTime = rnd.nextInt(3);
 								if (sleepTime > 1) {
@@ -551,10 +553,10 @@ public class ClosableBlockingQueueTest {
 				}
 			};
 			poller.start();
-			
+
 			pusher.join();
 			poller.join();
-			
+
 			if (pushErrorRef.get() != null) {
 				Throwable t = pushErrorRef.get();
 				t.printStackTrace();
@@ -571,16 +573,16 @@ public class ClosableBlockingQueueTest {
 			fail(e.getMessage());
 		}
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Utils
 	// ------------------------------------------------------------------------
-	
+
 	private static void testCallExitsOnClose(
 			final QueueCall call, ClosableBlockingQueue<String> queue) throws Exception {
-		
+
 		final AtomicReference<Throwable> errorRef = new AtomicReference<>();
-		
+
 		Runnable runnable = new Runnable() {
 			@Override
 			public void run() {
@@ -602,7 +604,7 @@ public class ClosableBlockingQueueTest {
 		Throwable cause = errorRef.get();
 		assertTrue(cause instanceof IllegalStateException);
 	}
-	
+
 	private interface QueueCall {
 		void call() throws Exception;
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/pom.xml b/flink-connectors/flink-connector-kafka-0.9/pom.xml
index 0140353..248f7e1 100644
--- a/flink-connectors/flink-connector-kafka-0.9/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.9/pom.xml
@@ -118,7 +118,7 @@ under the License.
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
-		
+
 		<dependency>
 			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-tests_${scala.binary.version}</artifactId>
@@ -208,5 +208,5 @@ under the License.
 			</plugin>
 		</plugins>
 	</build>
-	
+
 </project>

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
index e638348..a1d8967 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
@@ -34,7 +34,6 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.serialization.ByteArrayDeserializer;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,10 +48,10 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
  * Apache Kafka 0.9.x. The consumer can run in multiple parallel instances, each of which will pull
- * data from one or more Kafka partitions. 
- * 
+ * data from one or more Kafka partitions.
+ *
  * <p>The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
- * during a failure, and that the computation processes elements "exactly once". 
+ * during a failure, and that the computation processes elements "exactly once".
  * (Note: These guarantees naturally assume that Kafka itself does not loose any data.)</p>
  *
  * <p>Please note that Flink snapshots the offsets internally as part of its distributed checkpoints. The offsets
@@ -73,17 +72,16 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 
 	private static final Logger LOG = LoggerFactory.getLogger(FlinkKafkaConsumer09.class);
 
-	/**  Configuration key to change the polling timeout **/
+	/**  Configuration key to change the polling timeout. **/
 	public static final String KEY_POLL_TIMEOUT = "flink.poll-timeout";
 
-
 	/** From Kafka's Javadoc: The time, in milliseconds, spent waiting in poll if data is not
 	 * available. If 0, returns immediately with any records that are available now. */
 	public static final long DEFAULT_POLL_TIMEOUT = 100L;
 
 	// ------------------------------------------------------------------------
 
-	/** User-supplied properties for Kafka **/
+	/** User-supplied properties for Kafka. **/
 	protected final Properties properties;
 
 	/** From Kafka's Javadoc: The time, in milliseconds, spent waiting in poll if data is not
@@ -93,7 +91,7 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 	// ------------------------------------------------------------------------
 
 	/**
-	 * Creates a new Kafka streaming source consumer for Kafka 0.9.x
+	 * Creates a new Kafka streaming source consumer for Kafka 0.9.x .
 	 *
 	 * @param topic
 	 *           The name of the topic that should be consumed.
@@ -109,7 +107,7 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.9.x
 	 *
-	 * This constructor allows passing a {@see KeyedDeserializationSchema} for reading key/value
+	 * <p>This constructor allows passing a {@see KeyedDeserializationSchema} for reading key/value
 	 * pairs, offsets, and topic names from Kafka.
 	 *
 	 * @param topic
@@ -126,7 +124,7 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.9.x
 	 *
-	 * This constructor allows passing multiple topics to the consumer.
+	 * <p>This constructor allows passing multiple topics to the consumer.
 	 *
 	 * @param topics
 	 *           The Kafka topics to read from.
@@ -142,7 +140,7 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.9.x
 	 *
-	 * This constructor allows passing multiple topics and a key/value deserialization schema.
+	 * <p>This constructor allows passing multiple topics and a key/value deserialization schema.
 	 *
 	 * @param topics
 	 *           The Kafka topics to read from.
@@ -216,7 +214,7 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 				if (partitionsForTopic != null) {
 					partitions.addAll(convertToFlinkKafkaTopicPartition(partitionsForTopic));
 				}
-				else{
+				else {
 					LOG.info("Unable to retrieve any partitions for the requested topic: {}", topic);
 				}
 			}
@@ -243,12 +241,12 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 	}
 
 	// ------------------------------------------------------------------------
-	//  Utilities 
+	//  Utilities
 	// ------------------------------------------------------------------------
 
 	/**
-	 * Converts a list of Kafka PartitionInfo's to Flink's KafkaTopicPartition (which are serializable)
-	 * 
+	 * Converts a list of Kafka PartitionInfo's to Flink's KafkaTopicPartition (which are serializable).
+	 *
 	 * @param partitions A list of Kafka PartitionInfos.
 	 * @return A list of KafkaTopicPartitions
 	 */
@@ -264,7 +262,7 @@ public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 
 	/**
 	 * Makes sure that the ByteArrayDeserializer is registered in the Kafka properties.
-	 * 
+	 *
 	 * @param props The Kafka properties to register the serializer in.
 	 */
 	private static void setDeserializer(Properties props) {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
index cbed361..6b9768e 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
@@ -27,11 +27,10 @@ import org.apache.flink.streaming.util.serialization.SerializationSchema;
 
 import java.util.Properties;
 
-
 /**
  * Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.9.
  *
- * Please note that this producer does not have any reliability guarantees.
+ * <p>Please note that this producer does not have any reliability guarantees.
  *
  * @param <IN> Type of the messages to write into Kafka.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
index 9e1172b..d69187e 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
@@ -18,13 +18,15 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
-import org.apache.avro.specific.SpecificRecord;
-import org.apache.avro.specific.SpecificRecordBase;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.types.Row;
 
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.avro.specific.SpecificRecordBase;
+
+import java.util.Properties;
+
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.9.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
index a81422e..b2227cd 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.connectors.kafka.partitioner.KafkaPartitioner;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 
@@ -29,9 +30,9 @@ import java.util.Properties;
  * Kafka 0.9 {@link KafkaTableSink} that serializes data in JSON format.
  */
 public class Kafka09JsonTableSink extends KafkaJsonTableSink {
-	
+
 	/**
-	 * Creates {@link KafkaTableSink} for Kafka 0.9
+	 * Creates {@link KafkaTableSink} for Kafka 0.9 .
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
@@ -42,7 +43,7 @@ public class Kafka09JsonTableSink extends KafkaJsonTableSink {
 	}
 
 	/**
-	 * Creates {@link KafkaTableSink} for Kafka 0.9
+	 * Creates {@link KafkaTableSink} for Kafka 0.9 .
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
index 26fffa5..80811b2 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
@@ -19,9 +19,9 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
index c581332..bc50a4c 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
@@ -19,9 +19,9 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
index e6e3c51..0897f53 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
@@ -19,10 +19,12 @@
 package org.apache.flink.streaming.connectors.kafka.internal;
 
 import org.apache.flink.util.ExceptionUtils;
+
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 
 import javax.annotation.Nonnull;
 import javax.annotation.concurrent.ThreadSafe;
+
 import java.io.Closeable;
 
 import static org.apache.flink.util.Preconditions.checkNotNull;
@@ -32,13 +34,13 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * <i>producer</i> thread to a <i>consumer</i> thread. It effectively behaves like a
  * "size one blocking queue", with some extras around exception reporting, closing, and
  * waking up thread without {@link Thread#interrupt() interrupting} threads.
- * 
+ *
  * <p>This class is used in the Flink Kafka Consumer to hand over data and exceptions between
  * the thread that runs the KafkaConsumer class and the main thread.
- * 
+ *
  * <p>The Handover has the notion of "waking up" the producer thread with a {@link WakeupException}
  * rather than a thread interrupt.
- * 
+ *
  * <p>The Handover can also be "closed", signalling from one thread to the other that it
  * the thread has terminated.
  */
@@ -54,12 +56,12 @@ public final class Handover implements Closeable {
 	/**
 	 * Polls the next element from the Handover, possibly blocking until the next element is
 	 * available. This method behaves similar to polling from a blocking queue.
-	 * 
+	 *
 	 * <p>If an exception was handed in by the producer ({@link #reportError(Throwable)}), then
 	 * that exception is thrown rather than an element being returned.
-	 * 
+	 *
 	 * @return The next element (buffer of records, never null).
-	 * 
+	 *
 	 * @throws ClosedException Thrown if the Handover was {@link #close() closed}.
 	 * @throws Exception Rethrows exceptions from the {@link #reportError(Throwable)} method.
 	 */
@@ -81,7 +83,7 @@ public final class Handover implements Closeable {
 
 				// this statement cannot be reached since the above method always throws an exception
 				// this is only here to silence the compiler and any warnings
-				return ConsumerRecords.empty(); 
+				return ConsumerRecords.empty();
 			}
 		}
 	}
@@ -90,11 +92,11 @@ public final class Handover implements Closeable {
 	 * Hands over an element from the producer. If the Handover already has an element that was
 	 * not yet picked up by the consumer thread, this call blocks until the consumer picks up that
 	 * previous element.
-	 * 
+	 *
 	 * <p>This behavior is similar to a "size one" blocking queue.
-	 * 
+	 *
 	 * @param element The next element to hand over.
-	 * 
+	 *
 	 * @throws InterruptedException
 	 *                 Thrown, if the thread is interrupted while blocking for the Handover to be empty.
 	 * @throws WakeupException
@@ -135,15 +137,15 @@ public final class Handover implements Closeable {
 	 * Reports an exception. The consumer will throw the given exception immediately, if
 	 * it is currently blocked in the {@link #pollNext()} method, or the next time it
 	 * calls that method.
-	 * 
+	 *
 	 * <p>After this method has been called, no call to either {@link #produce(ConsumerRecords)}
 	 * or {@link #pollNext()} will ever return regularly any more, but will always return
 	 * exceptionally.
-	 * 
+	 *
 	 * <p>If another exception was already reported, this method does nothing.
-	 * 
+	 *
 	 * <p>For the producer, the Handover will appear as if it was {@link #close() closed}.
-	 * 
+	 *
 	 * @param t The exception to report.
 	 */
 	public void reportError(Throwable t) {
@@ -163,7 +165,7 @@ public final class Handover implements Closeable {
 	 * Closes the handover. Both the {@link #produce(ConsumerRecords)} method and the
 	 * {@link #pollNext()} will throw a {@link ClosedException} on any currently blocking and
 	 * future invocations.
-	 * 
+	 *
 	 * <p>If an exception was previously reported via the {@link #reportError(Throwable)} method,
 	 * that exception will not be overridden. The consumer thread will throw that exception upon
 	 * calling {@link #pollNext()}, rather than the {@code ClosedException}.


[07/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-elasticsearch*

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-elasticsearch*


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/c20b396f
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/c20b396f
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/c20b396f

Branch: refs/heads/master
Commit: c20b396f5ea09a54f52e980c70e30888a7a2859c
Parents: 88189f2
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:20:29 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:12 2017 +0200

----------------------------------------------------------------------
 .../ActionRequestFailureHandler.java            |  9 +++----
 .../ElasticsearchApiCallBridge.java             |  3 ++-
 .../elasticsearch/ElasticsearchSinkBase.java    | 28 +++++++++++++-------
 .../ElasticsearchSinkFunction.java              |  7 +++--
 .../elasticsearch/util/NoOpFailureHandler.java  |  1 +
 .../RetryRejectedExecutionFailureHandler.java   |  1 +
 .../ElasticsearchSinkBaseTest.java              | 23 ++++++++--------
 .../ElasticsearchSinkTestBase.java              | 13 ++++-----
 .../EmbeddedElasticsearchNodeEnvironment.java   |  2 +-
 .../testutils/SourceSinkDataTestKit.java        |  1 +
 .../flink-connector-elasticsearch/pom.xml       |  2 +-
 .../Elasticsearch1ApiCallBridge.java            |  2 ++
 .../elasticsearch/ElasticsearchSink.java        | 16 +++++------
 .../elasticsearch/IndexRequestBuilder.java      |  8 +++---
 .../elasticsearch/ElasticsearchSinkITCase.java  |  7 ++++-
 .../examples/ElasticsearchSinkExample.java      |  4 +--
 .../flink-connector-elasticsearch2/pom.xml      |  2 +-
 .../Elasticsearch2ApiCallBridge.java            |  4 ++-
 .../elasticsearch2/ElasticsearchSink.java       | 14 +++++-----
 .../ElasticsearchSinkFunction.java              |  7 +++--
 .../elasticsearch2/RequestIndexer.java          |  1 +
 ...mbeddedElasticsearchNodeEnvironmentImpl.java |  1 +
 .../elasticsearch2/ElasticsearchSinkITCase.java |  5 ++++
 .../examples/ElasticsearchSinkExample.java      |  2 ++
 .../Elasticsearch5ApiCallBridge.java            |  4 ++-
 .../elasticsearch5/ElasticsearchSink.java       | 14 +++++-----
 ...mbeddedElasticsearchNodeEnvironmentImpl.java |  1 +
 .../elasticsearch5/ElasticsearchSinkITCase.java |  5 ++++
 .../examples/ElasticsearchSinkExample.java      |  2 ++
 29 files changed, 110 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
index abbdd72..3ca1417 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
@@ -26,8 +26,7 @@ import java.io.Serializable;
  * {@link ActionRequest ActionRequests} should be handled, e.g. dropping them, reprocessing malformed documents, or
  * simply requesting them to be sent to Elasticsearch again if the failure is only temporary.
  *
- * <p>
- * Example:
+ * <p>Example:
  *
  * <pre>{@code
  *
@@ -50,12 +49,10 @@ import java.io.Serializable;
  *
  * }</pre>
  *
- * <p>
- * The above example will let the sink re-add requests that failed due to queue capacity saturation and drop requests
+ * <p>The above example will let the sink re-add requests that failed due to queue capacity saturation and drop requests
  * with malformed documents, without failing the sink. For all other failures, the sink will fail.
  *
- * <p>
- * Note: For Elasticsearch 1.x, it is not feasible to match the type of the failure because the exact type
+ * <p>Note: For Elasticsearch 1.x, it is not feasible to match the type of the failure because the exact type
  * could not be retrieved through the older version Java client APIs (thus, the types will be general {@link Exception}s
  * and only differ in the failure message). In this case, it is recommended to match on the provided REST status code.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
index b482432..ce98dfb 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
@@ -23,6 +23,7 @@ import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.client.Client;
 
 import javax.annotation.Nullable;
+
 import java.io.Serializable;
 import java.util.Map;
 
@@ -31,7 +32,7 @@ import java.util.Map;
  * This includes calls to create Elasticsearch clients, handle failed item responses, etc. Any incompatible Elasticsearch
  * Java APIs should be bridged using this interface.
  *
- * Implementations are allowed to be stateful. For example, for Elasticsearch 1.x, since connecting via an embedded node
+ * <p>Implementations are allowed to be stateful. For example, for Elasticsearch 1.x, since connecting via an embedded node
  * is allowed, the call bridge will hold reference to the created embedded node. Each instance of the sink will hold
  * exactly one instance of the call bridge, and state cleanup is performed when the sink is closed.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
index f6944b3..2ab5a90 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
@@ -25,6 +25,7 @@ import org.apache.flink.runtime.state.FunctionSnapshotContext;
 import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
 import org.apache.flink.util.InstantiationUtil;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
@@ -49,14 +50,12 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * Base class for all Flink Elasticsearch Sinks.
  *
- * <p>
- * This class implements the common behaviour across Elasticsearch versions, such as
+ * <p>This class implements the common behaviour across Elasticsearch versions, such as
  * the use of an internal {@link BulkProcessor} to buffer multiple {@link ActionRequest}s before
  * sending the requests to the cluster, as well as passing input records to the user provided
  * {@link ElasticsearchSinkFunction} for processing.
  *
- * <p>
- * The version specific API calls for different Elasticsearch versions should be defined by a concrete implementation of
+ * <p>The version specific API calls for different Elasticsearch versions should be defined by a concrete implementation of
  * a {@link ElasticsearchApiCallBridge}, which is provided to the constructor of this class. This call bridge is used,
  * for example, to create a Elasticsearch {@link Client}, handle failed item responses, etc.
  *
@@ -80,11 +79,21 @@ public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> imple
 	public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES = "bulk.flush.backoff.retries";
 	public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY = "bulk.flush.backoff.delay";
 
+	/**
+	 * Used to control whether the retry delay should increase exponentially or remain constant.
+	 */
 	public enum FlushBackoffType {
 		CONSTANT,
 		EXPONENTIAL
 	}
 
+	/**
+	 * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints
+	 * (i.e. the client's internal thread pool is full), the backoff policy decides how long the bulk processor will
+	 * wait before the operation is retried internally.
+	 *
+	 * <p>This is a proxy for version specific backoff policies.
+	 */
 	public class BulkFlushBackoffPolicy implements Serializable {
 
 		private static final long serialVersionUID = -6022851996101826049L;
@@ -149,14 +158,14 @@ public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> imple
 	//  Internals for the Flink Elasticsearch Sink
 	// ------------------------------------------------------------------------
 
-	/** Call bridge for different version-specfic */
+	/** Call bridge for different version-specific. */
 	private final ElasticsearchApiCallBridge callBridge;
 
 	/**
 	 * Number of pending action requests not yet acknowledged by Elasticsearch.
 	 * This value is maintained only if {@link ElasticsearchSinkBase#flushOnCheckpoint} is {@code true}.
 	 *
-	 * This is incremented whenever the user adds (or re-adds through the {@link ActionRequestFailureHandler}) requests
+	 * <p>This is incremented whenever the user adds (or re-adds through the {@link ActionRequestFailureHandler}) requests
 	 * to the {@link RequestIndexer}. It is decremented for each completed request of a bulk request, in
 	 * {@link BulkProcessor.Listener#afterBulk(long, BulkRequest, BulkResponse)} and
 	 * {@link BulkProcessor.Listener#afterBulk(long, BulkRequest, Throwable)}.
@@ -174,7 +183,7 @@ public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> imple
 	 * the user considered it should fail the sink via the
 	 * {@link ActionRequestFailureHandler#onFailure(ActionRequest, Throwable, int, RequestIndexer)} method.
 	 *
-	 * Errors will be checked and rethrown before processing each input element, and when the sink is closed.
+	 * <p>Errors will be checked and rethrown before processing each input element, and when the sink is closed.
 	 */
 	private final AtomicReference<Throwable> failureThrowable = new AtomicReference<>();
 
@@ -260,7 +269,7 @@ public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> imple
 	 * Disable flushing on checkpoint. When disabled, the sink will not wait for all
 	 * pending action requests to be acknowledged by Elasticsearch on checkpoints.
 	 *
-	 * NOTE: If flushing on checkpoint is disabled, the Flink Elasticsearch Sink does NOT
+	 * <p>NOTE: If flushing on checkpoint is disabled, the Flink Elasticsearch Sink does NOT
 	 * provide any strong guarantees for at-least-once delivery of action requests.
 	 */
 	public void disableFlushOnCheckpoint() {
@@ -320,8 +329,9 @@ public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> imple
 	/**
 	 * Build the {@link BulkProcessor}.
 	 *
-	 * Note: this is exposed for testing purposes.
+	 * <p>Note: this is exposed for testing purposes.
 	 */
+	@VisibleForTesting
 	protected BulkProcessor buildBulkProcessor(BulkProcessor.Listener listener) {
 		checkNotNull(listener);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
index 1e20a0a..8248204 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
@@ -20,6 +20,7 @@ package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.api.common.functions.Function;
 import org.apache.flink.api.common.functions.RuntimeContext;
+
 import org.elasticsearch.action.ActionRequest;
 
 import java.io.Serializable;
@@ -27,11 +28,9 @@ import java.io.Serializable;
 /**
  * Creates multiple {@link ActionRequest ActionRequests} from an element in a stream.
  *
- * <p>
- * This is used by sinks to prepare elements for sending them to Elasticsearch.
+ * <p>This is used by sinks to prepare elements for sending them to Elasticsearch.
  *
- * <p>
- * Example:
+ * <p>Example:
  *
  * <pre>{@code
  *					private static class TestElasticSearchSinkFunction implements

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
index b19ea08..dffee20 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
@@ -19,6 +19,7 @@ package org.apache.flink.streaming.connectors.elasticsearch.util;
 
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+
 import org.elasticsearch.action.ActionRequest;
 
 /**

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
index fabdcbc..9380959 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
@@ -21,6 +21,7 @@ package org.apache.flink.streaming.connectors.elasticsearch.util;
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
 import org.apache.flink.util.ExceptionUtils;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
index b9df5c6..5e59785 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
@@ -24,6 +24,7 @@ import org.apache.flink.streaming.api.operators.StreamSink;
 import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.ActionResponse;
 import org.elasticsearch.action.bulk.BulkItemResponse;
@@ -41,16 +42,16 @@ import javax.annotation.Nullable;
 
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.Map;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Suite of tests for {@link ElasticsearchSinkBase}.
@@ -123,9 +124,9 @@ public class ElasticsearchSinkBaseTest {
 
 	/**
 	 * Tests that any item failure in the listener callbacks due to flushing on an immediately following checkpoint
-	 * is rethrown; we set a timeout because the test will not finish if the logic is broken
+	 * is rethrown; we set a timeout because the test will not finish if the logic is broken.
 	 */
-	@Test(timeout=5000)
+	@Test(timeout = 5000)
 	public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable {
 		final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
 			new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());
@@ -250,7 +251,7 @@ public class ElasticsearchSinkBaseTest {
 	 * Tests that any bulk failure in the listener callbacks due to flushing on an immediately following checkpoint
 	 * is rethrown; we set a timeout because the test will not finish if the logic is broken.
 	 */
-	@Test(timeout=5000)
+	@Test(timeout = 5000)
 	public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable {
 		final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
 			new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());
@@ -307,9 +308,9 @@ public class ElasticsearchSinkBaseTest {
 
 	/**
 	 * Tests that the sink correctly waits for pending requests (including re-added requests) on checkpoints;
-	 * we set a timeout because the test will not finish if the logic is broken
+	 * we set a timeout because the test will not finish if the logic is broken.
 	 */
-	@Test(timeout=5000)
+	@Test(timeout = 5000)
 	public void testAtLeastOnceSink() throws Throwable {
 		final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
 				new HashMap<String, String>(),
@@ -365,9 +366,9 @@ public class ElasticsearchSinkBaseTest {
 	/**
 	 * This test is meant to assure that testAtLeastOnceSink is valid by testing that if flushing is disabled,
 	 * the snapshot method does indeed finishes without waiting for pending requests;
-	 * we set a timeout because the test will not finish if the logic is broken
+	 * we set a timeout because the test will not finish if the logic is broken.
 	 */
-	@Test(timeout=5000)
+	@Test(timeout = 5000)
 	public void testDoesNotWaitForPendingRequestsIfFlushingDisabled() throws Exception {
 		final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
 			new HashMap<String, String>(), new SimpleSinkFunction<String>(), new DummyRetryFailureHandler());
@@ -409,7 +410,7 @@ public class ElasticsearchSinkBaseTest {
 
 		/**
 		 * This method is used to mimic a scheduled bulk request; we need to do this
-		 * manually because we are mocking the BulkProcessor
+		 * manually because we are mocking the BulkProcessor.
 		 */
 		public void manualBulkRequestWithAllPendingRequests() {
 			flushLatch.trigger(); // let the flush
@@ -429,7 +430,7 @@ public class ElasticsearchSinkBaseTest {
 		 * Set the list of mock failures to use for the next bulk of item responses. A {@code null}
 		 * means that the response is successful, failed otherwise.
 		 *
-		 * The list is used with corresponding order to the requests in the bulk, i.e. the first
+		 * <p>The list is used with corresponding order to the requests in the bulk, i.e. the first
 		 * request uses the response at index 0, the second requests uses the response at index 1, etc.
 		 */
 		public void setMockItemFailuresListForNextBulkItemResponses(List<? extends Throwable> mockItemFailuresList) {

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
index 2f9e4c1..297bc5d 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
@@ -24,6 +24,7 @@ import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.elasticsearch.testutils.SourceSinkDataTestKit;
 import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase;
 import org.apache.flink.util.InstantiationUtil;
+
 import org.elasticsearch.client.Client;
 import org.elasticsearch.client.transport.TransportClient;
 import org.junit.AfterClass;
@@ -45,7 +46,7 @@ import static org.junit.Assert.fail;
  */
 public abstract class ElasticsearchSinkTestBase extends StreamingMultipleProgramsTestBase {
 
-	protected final static String CLUSTER_NAME = "test-cluster";
+	protected static final String CLUSTER_NAME = "test-cluster";
 
 	protected static EmbeddedElasticsearchNodeEnvironment embeddedNodeEnv;
 
@@ -116,7 +117,7 @@ public abstract class ElasticsearchSinkTestBase extends StreamingMultipleProgram
 
 		try {
 			createElasticsearchSink(userConfig, null, new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"));
-		} catch(IllegalArgumentException expectedException) {
+		} catch (IllegalArgumentException expectedException) {
 			// test passes
 			return;
 		}
@@ -137,7 +138,7 @@ public abstract class ElasticsearchSinkTestBase extends StreamingMultipleProgram
 				userConfig,
 				Collections.<InetSocketAddress>emptyList(),
 				new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"));
-		} catch(IllegalArgumentException expectedException) {
+		} catch (IllegalArgumentException expectedException) {
 			// test passes
 			return;
 		}
@@ -162,7 +163,7 @@ public abstract class ElasticsearchSinkTestBase extends StreamingMultipleProgram
 
 		try {
 			env.execute("Elasticsearch Transport Client Test");
-		} catch(JobExecutionException expectedException) {
+		} catch (JobExecutionException expectedException) {
 			assertTrue(expectedException.getCause().getMessage().contains("not connected to any Elasticsearch nodes"));
 			return;
 		}
@@ -170,7 +171,7 @@ public abstract class ElasticsearchSinkTestBase extends StreamingMultipleProgram
 		fail();
 	}
 
-	/** Creates a version-specific Elasticsearch sink, using arbitrary transport addresses */
+	/** Creates a version-specific Elasticsearch sink, using arbitrary transport addresses. */
 	protected abstract <T> ElasticsearchSinkBase<T> createElasticsearchSink(Map<String, String> userConfig,
 																			List<InetSocketAddress> transportAddresses,
 																			ElasticsearchSinkFunction<T> elasticsearchSinkFunction);
@@ -178,7 +179,7 @@ public abstract class ElasticsearchSinkTestBase extends StreamingMultipleProgram
 	/**
 	 * Creates a version-specific Elasticsearch sink to connect to a local embedded Elasticsearch node.
 	 *
-	 * This case is singled out from {@link ElasticsearchSinkTestBase#createElasticsearchSink(Map, List, ElasticsearchSinkFunction)}
+	 * <p>This case is singled out from {@link ElasticsearchSinkTestBase#createElasticsearchSink(Map, List, ElasticsearchSinkFunction)}
 	 * because the Elasticsearch Java API to do so is incompatible across different versions.
 	 */
 	protected abstract <T> ElasticsearchSinkBase<T> createElasticsearchSinkForEmbeddedNode(

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
index f59eb03..ea6e7a3 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
@@ -24,7 +24,7 @@ import java.io.File;
 /**
  * The {@link EmbeddedElasticsearchNodeEnvironment} is used in integration tests to manage Elasticsearch embedded nodes.
  *
- * NOTE: In order for {@link ElasticsearchSinkTestBase} to dynamically load version-specific implementations
+ * <p>NOTE: In order for {@link ElasticsearchSinkTestBase} to dynamically load version-specific implementations
  *       for the tests, concrete implementations must be named {@code EmbeddedElasticsearchNodeEnvironmentImpl}. It must
  *       also be located under the same package. The intentional package-private accessibility of this interface
  *       enforces that.

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
index 55a48fa..4e3d3e2 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
@@ -22,6 +22,7 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+
 import org.elasticsearch.action.get.GetRequest;
 import org.elasticsearch.action.get.GetResponse;
 import org.elasticsearch.action.index.IndexRequest;

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/pom.xml b/flink-connectors/flink-connector-elasticsearch/pom.xml
index 93e4eb6..7785a77 100644
--- a/flink-connectors/flink-connector-elasticsearch/pom.xml
+++ b/flink-connectors/flink-connector-elasticsearch/pom.xml
@@ -65,7 +65,7 @@ under the License.
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
-		
+
 		<dependency>
 			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
index 8a59da9..5659ee6 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.util.Preconditions;
+
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.client.Client;
@@ -29,6 +30,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+
 import java.util.List;
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
index 2298986..bc5ac84 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.action.index.IndexRequest;
@@ -32,24 +33,20 @@ import java.util.Map;
  * Elasticsearch 1.x sink that requests multiple {@link ActionRequest ActionRequests}
  * against a cluster for each incoming element.
  *
- * <p>
- * When using the first constructor {@link #ElasticsearchSink(java.util.Map, ElasticsearchSinkFunction)}
+ * <p>When using the first constructor {@link #ElasticsearchSink(java.util.Map, ElasticsearchSinkFunction)}
  * the sink will create a local {@link Node} for communicating with the Elasticsearch cluster. When using the second
  * constructor {@link #ElasticsearchSink(java.util.Map, java.util.List, ElasticsearchSinkFunction)} a
  * {@link TransportClient} will be used instead.
  *
- * <p>
- * <b>Attention: </b> When using the {@code TransportClient} the sink will fail if no cluster
+ * <p><b>Attention: </b> When using the {@code TransportClient} the sink will fail if no cluster
  * can be connected to. When using the local {@code Node} for communicating, the sink will block and wait for a cluster
  * to come online.
  *
- * <p>
- * The {@link Map} passed to the constructor is used to create the {@link Node} or {@link TransportClient}. The config
+ * <p>The {@link Map} passed to the constructor is used to create the {@link Node} or {@link TransportClient}. The config
  * keys can be found in the <a href="https://www.elastic.io">Elasticsearch documentation</a>. An important setting is
  * {@code cluster.name}, which should be set to the name of the cluster that the sink should emit to.
  *
- * <p>
- * Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
+ * <p>Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
  * This will buffer elements before sending a request to the cluster. The behaviour of the
  * {@code BulkProcessor} can be configured using these config keys:
  * <ul>
@@ -59,8 +56,7 @@ import java.util.Map;
  *   settings in milliseconds
  * </ul>
  *
- * <p>
- * You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
+ * <p>You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
  * {@link ActionRequest ActionRequests} for each incoming element. See the class level documentation of
  * {@link ElasticsearchSinkFunction} for an example.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilder.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilder.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilder.java
index 18aa11e..1a93fa3 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilder.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilder.java
@@ -15,10 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.api.common.functions.Function;
 import org.apache.flink.api.common.functions.RuntimeContext;
+
 import org.elasticsearch.action.index.IndexRequest;
 
 import java.io.Serializable;
@@ -26,14 +28,12 @@ import java.io.Serializable;
 /**
  * Function that creates an {@link IndexRequest} from an element in a Stream.
  *
- * <p>
- * This is used by {@link org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSink}
+ * <p>This is used by {@link org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSink}
  * to prepare elements for sending them to Elasticsearch. See
  * <a href="https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/index_.html">Index API</a>
  * for information about how to format data for adding it to an Elasticsearch index.
  *
- * <p>
- * Example:
+ * <p>Example:
  *
  * <pre>{@code
  *     private static class MyIndexRequestBuilder implements IndexRequestBuilder<String> {

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
index 3a7b113..ecbebd7 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
@@ -15,15 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch;
 
-import com.google.common.collect.Lists;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.streaming.api.datastream.DataStreamSource;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.elasticsearch.testutils.SourceSinkDataTestKit;
 import org.apache.flink.streaming.connectors.elasticsearch.util.ElasticsearchUtils;
+
+import com.google.common.collect.Lists;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.Client;
 import org.elasticsearch.client.Requests;
@@ -36,6 +38,9 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+/**
+ * IT Cases for the {@link ElasticsearchSink}.
+ */
 public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase {
 
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/examples/ElasticsearchSinkExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/examples/ElasticsearchSinkExample.java b/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/examples/ElasticsearchSinkExample.java
index d697c3c..f181032 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/examples/ElasticsearchSinkExample.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/examples/ElasticsearchSinkExample.java
@@ -24,6 +24,7 @@ import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSink;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.Requests;
 import org.elasticsearch.common.transport.InetSocketTransportAddress;
@@ -42,7 +43,7 @@ import java.util.Map;
 public class ElasticsearchSinkExample {
 
 	public static void main(String[] args) throws Exception {
-		
+
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
 
 		DataStream<String> source = env.generateSequence(0, 20).map(new MapFunction<Long, String>() {
@@ -67,7 +68,6 @@ public class ElasticsearchSinkExample {
 			}
 		}));
 
-
 		env.execute("Elasticsearch Sink Example");
 	}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/pom.xml b/flink-connectors/flink-connector-elasticsearch2/pom.xml
index 7e21b8f..1f342bc 100644
--- a/flink-connectors/flink-connector-elasticsearch2/pom.xml
+++ b/flink-connectors/flink-connector-elasticsearch2/pom.xml
@@ -43,7 +43,7 @@ under the License.
 	<dependencies>
 
 		<!-- core dependencies -->
- 
+
 		<dependency>
 			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
index e85daf5..66b676c 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
@@ -21,6 +21,7 @@ import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallB
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.util.ElasticsearchUtils;
 import org.apache.flink.util.Preconditions;
+
 import org.elasticsearch.action.bulk.BackoffPolicy;
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
@@ -33,6 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+
 import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.Map;
@@ -49,7 +51,7 @@ public class Elasticsearch2ApiCallBridge implements ElasticsearchApiCallBridge {
 	/**
 	 * User-provided transport addresses.
 	 *
-	 * We are using {@link InetSocketAddress} because {@link TransportAddress} is not serializable in Elasticsearch 2.x.
+	 * <p>We are using {@link InetSocketAddress} because {@link TransportAddress} is not serializable in Elasticsearch 2.x.
 	 */
 	private final List<InetSocketAddress> transportAddresses;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
index 6d771d4..0c991a6 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
@@ -14,11 +14,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.client.transport.TransportClient;
@@ -31,17 +33,14 @@ import java.util.Map;
  * Elasticsearch 2.x sink that requests multiple {@link ActionRequest ActionRequests}
  * against a cluster for each incoming element.
  *
- * <p>
- * The sink internally uses a {@link TransportClient} to communicate with an Elasticsearch cluster.
+ * <p>The sink internally uses a {@link TransportClient} to communicate with an Elasticsearch cluster.
  * The sink will fail if no cluster can be connected to using the provided transport addresses passed to the constructor.
  *
- * <p>
- * The {@link Map} passed to the constructor is used to create the {@code TransportClient}. The config keys can be found
+ * <p>The {@link Map} passed to the constructor is used to create the {@code TransportClient}. The config keys can be found
  * in the <a href="https://www.elastic.io">Elasticsearch documentation</a>. An important setting is {@code cluster.name},
  * which should be set to the name of the cluster that the sink should emit to.
  *
- * <p>
- * Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
+ * <p>Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
  * This will buffer elements before sending a request to the cluster. The behaviour of the
  * {@code BulkProcessor} can be configured using these config keys:
  * <ul>
@@ -51,8 +50,7 @@ import java.util.Map;
  *   settings in milliseconds
  * </ul>
  *
- * <p>
- * You also have to provide an {@link org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction}.
+ * <p>You also have to provide an {@link org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction}.
  * This is used to create multiple {@link ActionRequest ActionRequests} for each incoming element. See the class level
  * documentation of {@link org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction} for an example.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkFunction.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkFunction.java
index c474390..74a1446 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkFunction.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkFunction.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
 import org.apache.flink.api.common.functions.Function;
@@ -24,11 +25,9 @@ import java.io.Serializable;
 /**
  * Method that creates multiple {@link org.elasticsearch.action.ActionRequest}s from an element in a Stream.
  *
- * <p>
- * This is used by {@link ElasticsearchSink} to prepare elements for sending them to Elasticsearch.
+ * <p>This is used by {@link ElasticsearchSink} to prepare elements for sending them to Elasticsearch.
  *
- * <p>
- * Example:
+ * <p>Example:
  *
  * <pre>{@code
  *					private static class TestElasticSearchSinkFunction implements

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/RequestIndexer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/RequestIndexer.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/RequestIndexer.java
index b2b3de4..ecaf984 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/RequestIndexer.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/RequestIndexer.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
 import org.elasticsearch.action.ActionRequest;

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
index ddf3bd6..db4cd8c 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.streaming.connectors.elasticsearch2.ElasticsearchSinkITCase;
+
 import org.elasticsearch.client.Client;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.node.Node;

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
index 93ac6c8..7ded893 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
@@ -14,11 +14,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
+
 import org.junit.Test;
 
 import java.net.InetAddress;
@@ -27,6 +29,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+/**
+ * IT cases for the {@link ElasticsearchSink}.
+ */
 public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase {
 
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/examples/ElasticsearchSinkExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/examples/ElasticsearchSinkExample.java b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/examples/ElasticsearchSinkExample.java
index 8c50847..c963927 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/examples/ElasticsearchSinkExample.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/examples/ElasticsearchSinkExample.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch2.examples;
 
 import org.apache.flink.api.common.functions.MapFunction;
@@ -21,6 +22,7 @@ import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.elasticsearch2.ElasticsearchSink;
+
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.Requests;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
index c7d81f5..ffb572d 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
@@ -21,6 +21,7 @@ import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallB
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.util.ElasticsearchUtils;
 import org.apache.flink.util.Preconditions;
+
 import org.elasticsearch.action.bulk.BackoffPolicy;
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
@@ -36,6 +37,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+
 import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.Map;
@@ -52,7 +54,7 @@ public class Elasticsearch5ApiCallBridge implements ElasticsearchApiCallBridge {
 	/**
 	 * User-provided transport addresses.
 	 *
-	 * We are using {@link InetSocketAddress} because {@link TransportAddress} is not serializable in Elasticsearch 5.x.
+	 * <p>We are using {@link InetSocketAddress} because {@link TransportAddress} is not serializable in Elasticsearch 5.x.
 	 */
 	private final List<InetSocketAddress> transportAddresses;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
index 61023c2..0f1cc91 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
@@ -14,12 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch5;
 
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.client.transport.TransportClient;
@@ -32,17 +34,14 @@ import java.util.Map;
  * Elasticsearch 5.x sink that requests multiple {@link ActionRequest ActionRequests}
  * against a cluster for each incoming element.
  *
- * <p>
- * The sink internally uses a {@link TransportClient} to communicate with an Elasticsearch cluster.
+ * <p>The sink internally uses a {@link TransportClient} to communicate with an Elasticsearch cluster.
  * The sink will fail if no cluster can be connected to using the provided transport addresses passed to the constructor.
  *
- * <p>
- * The {@link Map} passed to the constructor is used to create the {@code TransportClient}. The config keys can be found
+ * <p>The {@link Map} passed to the constructor is used to create the {@code TransportClient}. The config keys can be found
  * in the <a href="https://www.elastic.io">Elasticsearch documentation</a>. An important setting is {@code cluster.name},
  * which should be set to the name of the cluster that the sink should emit to.
  *
- * <p>
- * Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
+ * <p>Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
  * This will buffer elements before sending a request to the cluster. The behaviour of the
  * {@code BulkProcessor} can be configured using these config keys:
  * <ul>
@@ -52,8 +51,7 @@ import java.util.Map;
  *   settings in milliseconds
  * </ul>
  *
- * <p>
- * You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
+ * <p>You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
  * {@link ActionRequest ActionRequests} for each incoming element. See the class level documentation of
  * {@link ElasticsearchSinkFunction} for an example.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
index f3d8897..16e292d 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
@@ -19,6 +19,7 @@
 package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.streaming.connectors.elasticsearch5.ElasticsearchSinkITCase;
+
 import org.elasticsearch.client.Client;
 import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.settings.Settings;

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
index 3ebda52..ad7c664 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
@@ -15,11 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch5;
 
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
+
 import org.junit.Test;
 
 import java.net.InetAddress;
@@ -28,6 +30,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+/**
+ * IT cases for the {@link ElasticsearchSink}.
+ */
 public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase {
 
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/c20b396f/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/examples/ElasticsearchSinkExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/examples/ElasticsearchSinkExample.java b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/examples/ElasticsearchSinkExample.java
index 4135283..22c1053 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/examples/ElasticsearchSinkExample.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/examples/ElasticsearchSinkExample.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.elasticsearch5.examples;
 
 import org.apache.flink.api.common.functions.MapFunction;
@@ -23,6 +24,7 @@ import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
 import org.apache.flink.streaming.connectors.elasticsearch5.ElasticsearchSink;
+
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.Requests;
 


[10/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kafka*

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
index 1c87542..82294d7 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
@@ -33,7 +33,6 @@ import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.TopicPartition;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,7 +43,7 @@ import java.util.Properties;
 
 /**
  * A fetcher that fetches data from Kafka brokers via the Kafka 0.9 consumer API.
- * 
+ *
  * @param <T> The type of elements produced by the fetcher.
  */
 public class Kafka09Fetcher<T> extends AbstractFetcher<T, TopicPartition> {
@@ -53,16 +52,16 @@ public class Kafka09Fetcher<T> extends AbstractFetcher<T, TopicPartition> {
 
 	// ------------------------------------------------------------------------
 
-	/** The schema to convert between Kafka's byte messages, and Flink's objects */
+	/** The schema to convert between Kafka's byte messages, and Flink's objects. */
 	private final KeyedDeserializationSchema<T> deserializer;
 
-	/** The handover of data and exceptions between the consumer thread and the task thread */
+	/** The handover of data and exceptions between the consumer thread and the task thread. */
 	private final Handover handover;
 
-	/** The thread that runs the actual KafkaConsumer and hand the record batches to this fetcher */
+	/** The thread that runs the actual KafkaConsumer and hand the record batches to this fetcher. */
 	private final KafkaConsumerThread consumerThread;
 
-	/** Flag to mark the main work loop as alive */
+	/** Flag to mark the main work loop as alive. */
 	private volatile boolean running = true;
 
 	// ------------------------------------------------------------------------
@@ -80,8 +79,7 @@ public class Kafka09Fetcher<T> extends AbstractFetcher<T, TopicPartition> {
 			KeyedDeserializationSchema<T> deserializer,
 			Properties kafkaProperties,
 			long pollTimeout,
-			boolean useMetrics) throws Exception
-	{
+			boolean useMetrics) throws Exception {
 		super(
 				sourceContext,
 				assignedPartitionsWithInitialOffsets,
@@ -97,7 +95,7 @@ public class Kafka09Fetcher<T> extends AbstractFetcher<T, TopicPartition> {
 
 		final MetricGroup kafkaMetricGroup = metricGroup.addGroup("KafkaConsumer");
 		addOffsetStateGauge(kafkaMetricGroup);
-		
+
 		this.consumerThread = new KafkaConsumerThread(
 				LOG,
 				handover,

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
index 37ba34c..c0b9441 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
@@ -25,11 +25,11 @@ import java.util.List;
 
 /**
  * The ConsumerCallBridge simply calls methods on the {@link KafkaConsumer}.
- * 
- * This indirection is necessary, because Kafka broke binary compatibility between 0.9 and 0.10,
+ *
+ * <p>This indirection is necessary, because Kafka broke binary compatibility between 0.9 and 0.10,
  * for example changing {@code assign(List)} to {@code assign(Collection)}.
- * 
- * Because of that, we need to have two versions whose compiled code goes against different method signatures.
+ *
+ * <p>Because of that, we need to have two versions whose compiled code goes against different method signatures.
  * Even though the source of subclasses may look identical, the byte code will be different, because they
  * are compiled against different dependencies.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
index cbe1551..0c5482a 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
@@ -22,6 +22,7 @@ import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionState;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionStateSentinel;
 import org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricWrapper;
+
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
@@ -30,7 +31,6 @@ import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.errors.WakeupException;
-
 import org.slf4j.Logger;
 
 import java.util.ArrayList;
@@ -45,54 +45,53 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * The thread the runs the {@link KafkaConsumer}, connecting to the brokers and polling records.
  * The thread pushes the data into a {@link Handover} to be picked up by the fetcher that will
  * deserialize and emit the records.
- * 
+ *
  * <p><b>IMPORTANT:</b> This thread must not be interrupted when attempting to shut it down.
  * The Kafka consumer code was found to not always handle interrupts well, and to even
  * deadlock in certain situations.
- * 
+ *
  * <p>Implementation Note: This code is written to be reusable in later versions of the KafkaConsumer.
  * Because Kafka is not maintaining binary compatibility, we use a "call bridge" as an indirection
  * to the KafkaConsumer calls that change signature.
  */
 public class KafkaConsumerThread extends Thread {
 
-	/** Logger for this consumer */
+	/** Logger for this consumer. */
 	private final Logger log;
 
-	/** The handover of data and exceptions between the consumer thread and the task thread */
+	/** The handover of data and exceptions between the consumer thread and the task thread. */
 	private final Handover handover;
 
-	/** The next offsets that the main thread should commit */
+	/** The next offsets that the main thread should commit. */
 	private final AtomicReference<Map<TopicPartition, OffsetAndMetadata>> nextOffsetsToCommit;
 
-	/** The configuration for the Kafka consumer */
+	/** The configuration for the Kafka consumer. */
 	private final Properties kafkaProperties;
 
-	/** The partitions that this consumer reads from */ 
+	/** The partitions that this consumer reads from. */
 	private final KafkaTopicPartitionState<TopicPartition>[] subscribedPartitionStates;
 
 	/** We get this from the outside to publish metrics. **/
 	private final MetricGroup kafkaMetricGroup;
 
-	/** The indirections on KafkaConsumer methods, for cases where KafkaConsumer compatibility is broken */
+	/** The indirections on KafkaConsumer methods, for cases where KafkaConsumer compatibility is broken. */
 	private final KafkaConsumerCallBridge consumerCallBridge;
 
-	/** The maximum number of milliseconds to wait for a fetch batch */
+	/** The maximum number of milliseconds to wait for a fetch batch. */
 	private final long pollTimeout;
 
-	/** Flag whether to add Kafka's metrics to the Flink metrics */
+	/** Flag whether to add Kafka's metrics to the Flink metrics. */
 	private final boolean useMetrics;
 
-	/** Reference to the Kafka consumer, once it is created */
+	/** Reference to the Kafka consumer, once it is created. */
 	private volatile KafkaConsumer<byte[], byte[]> consumer;
 
-	/** Flag to mark the main work loop as alive */
+	/** Flag to mark the main work loop as alive. */
 	private volatile boolean running;
 
-	/** Flag tracking whether the latest commit request has completed */
+	/** Flag tracking whether the latest commit request has completed. */
 	private volatile boolean commitInProgress;
 
-
 	public KafkaConsumerThread(
 			Logger log,
 			Handover handover,
@@ -271,7 +270,7 @@ public class KafkaConsumerThread extends Thread {
 		// this wakes up the consumer if it is blocked handing over records
 		handover.wakeupProducer();
 
-		// this wakes up the consumer if it is blocked in a kafka poll 
+		// this wakes up the consumer if it is blocked in a kafka poll
 		if (consumer != null) {
 			consumer.wakeup();
 		}
@@ -280,11 +279,11 @@ public class KafkaConsumerThread extends Thread {
 	/**
 	 * Tells this thread to commit a set of offsets. This method does not block, the committing
 	 * operation will happen asynchronously.
-	 * 
+	 *
 	 * <p>Only one commit operation may be pending at any time. If the committing takes longer than
 	 * the frequency with which this method is called, then some commits may be skipped due to being
 	 * superseded  by newer ones.
-	 * 
+	 *
 	 * @param offsetsToCommit The offsets to commit
 	 */
 	public void setOffsetsToCommit(Map<TopicPartition, OffsetAndMetadata> offsetsToCommit) {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/main/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/resources/log4j.properties b/flink-connectors/flink-connector-kafka-0.9/src/main/resources/log4j.properties
index 6bdfb48..6eef174 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/resources/log4j.properties
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/resources/log4j.properties
@@ -26,4 +26,3 @@ log4j.appender.testlogger.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
 # suppress the irrelevant (wrong) warnings from the netty channel handler
 log4j.logger.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, testlogger
 
-

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSourceTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSourceTest.java
index eff8264..5e3c42c 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSourceTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSourceTest.java
@@ -18,12 +18,16 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.streaming.util.serialization.AvroRowDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.types.Row;
 
+import java.util.Properties;
+
+/**
+ * Tests for the {@link Kafka09AvroTableSource}.
+ */
 public class Kafka09AvroTableSourceTest extends KafkaTableSourceTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09FetcherTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09FetcherTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09FetcherTest.java
index 6e13db2..f55c264 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09FetcherTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09FetcherTest.java
@@ -39,10 +39,8 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.clients.consumer.OffsetCommitCallback;
 import org.apache.kafka.common.TopicPartition;
-
 import org.junit.Test;
 import org.junit.runner.RunWith;
-
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -63,7 +61,6 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyLong;
 import static org.powermock.api.mockito.PowerMockito.doAnswer;
@@ -91,10 +88,10 @@ public class Kafka09FetcherTest {
 
 		// ----- the mock consumer with blocking poll calls ----
 		final MultiShotLatch blockerLatch = new MultiShotLatch();
-		
+
 		KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
 		when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
-			
+
 			@Override
 			public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
 				sync.trigger();
@@ -157,7 +154,7 @@ public class Kafka09FetcherTest {
 		sync.await();
 
 		// ----- trigger the offset commit -----
-		
+
 		final AtomicReference<Throwable> commitError = new AtomicReference<>();
 		final Thread committer = new Thread("committer runner") {
 			@Override
@@ -192,11 +189,11 @@ public class Kafka09FetcherTest {
 
 	@Test
 	public void ensureOffsetsGetCommitted() throws Exception {
-		
+
 		// test data
 		final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
 		final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
-		
+
 		final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
 		testCommitData1.put(testPartition1, 11L);
 		testCommitData1.put(testPartition2, 18L);
@@ -207,7 +204,6 @@ public class Kafka09FetcherTest {
 
 		final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
 
-
 		// ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
 
 		final MultiShotLatch blockerLatch = new MultiShotLatch();
@@ -234,7 +230,7 @@ public class Kafka09FetcherTest {
 			@Override
 			public Void answer(InvocationOnMock invocation) {
 				@SuppressWarnings("unchecked")
-				Map<TopicPartition, OffsetAndMetadata> offsets = 
+				Map<TopicPartition, OffsetAndMetadata> offsets =
 						(Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
 
 				OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
@@ -242,7 +238,7 @@ public class Kafka09FetcherTest {
 				commitStore.add(offsets);
 				callback.onComplete(offsets, null);
 
-				return null; 
+				return null;
 			}
 		}).when(mockConsumer).commitAsync(
 				Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
@@ -322,7 +318,7 @@ public class Kafka09FetcherTest {
 				assertEquals(27L, entry.getValue().offset());
 			}
 		}
-		
+
 		// ----- test done, wait till the fetcher is done for a clean shutdown -----
 		fetcher.cancel();
 		fetcherRunner.join();
@@ -387,7 +383,6 @@ public class Kafka09FetcherTest {
 				0L,
 				false);
 
-
 		// ----- run the fetcher -----
 
 		final AtomicReference<Throwable> error = new AtomicReference<>();

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ITCase.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ITCase.java
index ca9965c..de4d010 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ITCase.java
@@ -19,6 +19,9 @@ package org.apache.flink.streaming.connectors.kafka;
 
 import org.junit.Test;
 
+/**
+ * IT cases for Kafka 0.9 .
+ */
 public class Kafka09ITCase extends KafkaConsumerTestBase {
 
 	// ------------------------------------------------------------------------
@@ -35,7 +38,6 @@ public class Kafka09ITCase extends KafkaConsumerTestBase {
 		runSimpleConcurrentProducerConsumerTopology();
 	}
 
-
 	@Test(timeout = 60000)
 	public void testKeyValueSupport() throws Exception {
 		runKeyValueTest();
@@ -58,7 +60,6 @@ public class Kafka09ITCase extends KafkaConsumerTestBase {
 		runFailOnDeployTest();
 	}
 
-
 	// --- source to partition mappings and exactly once ---
 
 	@Test(timeout = 60000)

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
index 3afb5e4..c8fb4cd 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
@@ -15,15 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.util.serialization.JsonRowSerializationSchema;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 
+/**
+ * Tests for the {@link Kafka09JsonTableSink}.
+ */
 public class Kafka09JsonTableSinkTest extends KafkaTableSinkTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSourceTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSourceTest.java
index 35cd9ce..ec70386 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSourceTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSourceTest.java
@@ -18,12 +18,16 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
+import org.apache.flink.types.Row;
 
+import java.util.Properties;
+
+/**
+ * Tests for the {@link Kafka09JsonTableSource}.
+ */
 public class Kafka09JsonTableSourceTest extends KafkaTableSourceTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ProducerITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ProducerITCase.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ProducerITCase.java
index ae4f5b2..fe8a1a5 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ProducerITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09ProducerITCase.java
@@ -18,9 +18,11 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-
 import org.junit.Test;
 
+/**
+ * IT cases for the {@link FlinkKafkaProducer09}.
+ */
 @SuppressWarnings("serial")
 public class Kafka09ProducerITCase extends KafkaProducerTestBase {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09SecuredRunITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09SecuredRunITCase.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09SecuredRunITCase.java
index 16a13c0..d41cd91 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09SecuredRunITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09SecuredRunITCase.java
@@ -18,16 +18,15 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.test.util.SecureTestEnvironment;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-
-/*
- * Kafka Secure Connection (kerberos) IT test case
+/**
+ * Kafka Secure Connection (kerberos) IT test case.
  */
 public class Kafka09SecuredRunITCase extends KafkaConsumerTestBase {
 
@@ -51,7 +50,6 @@ public class Kafka09SecuredRunITCase extends KafkaConsumerTestBase {
 		SecureTestEnvironment.cleanup();
 	}
 
-
 	//timeout interval is large since in Travis, ZK connection timeout occurs frequently
 	//The timeout for the test case is 2 times timeout of ZK connection
 	@Test(timeout = 600000)

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
index e9a4947..6b6c43f 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTest.java
@@ -20,9 +20,9 @@ package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.api.operators.StreamSink;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.connectors.kafka.testutils.FakeStandardProducerConfig;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
-import org.apache.flink.streaming.connectors.kafka.testutils.FakeStandardProducerConfig;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 import org.apache.flink.util.TestLogger;
 
@@ -31,13 +31,10 @@ import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.PartitionInfo;
-
 import org.junit.Test;
 import org.junit.runner.RunWith;
-
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
@@ -53,17 +50,20 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.powermock.api.mockito.PowerMockito.whenNew;
 
+/**
+ * Tests for the {@link KafkaProducer}.
+ */
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(FlinkKafkaProducerBase.class)
 public class KafkaProducerTest extends TestLogger {
-	
+
 	@Test
 	@SuppressWarnings("unchecked")
 	public void testPropagateExceptions() {
 		try {
 			// mock kafka producer
 			KafkaProducer<?, ?> kafkaProducerMock = mock(KafkaProducer.class);
-			
+
 			// partition setup
 			when(kafkaProducerMock.partitionsFor(anyString())).thenReturn(
 				// returning a unmodifiable list to mimic KafkaProducer#partitionsFor() behaviour
@@ -79,13 +79,13 @@ public class KafkaProducerTest extends TestLogger {
 						return null;
 					}
 				});
-			
+
 			// make sure the FlinkKafkaProducer instantiates our mock producer
 			whenNew(KafkaProducer.class).withAnyArguments().thenReturn(kafkaProducerMock);
-			
+
 			// (1) producer that propagates errors
 			FlinkKafkaProducer09<String> producerPropagating = new FlinkKafkaProducer09<>(
-					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner<String>)null);
+					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner<String>) null);
 
 			OneInputStreamOperatorTestHarness<String, Object> testHarness =
 					new OneInputStreamOperatorTestHarness<>(new StreamSink(producerPropagating));
@@ -106,7 +106,7 @@ public class KafkaProducerTest extends TestLogger {
 			// (2) producer that only logs errors
 
 			FlinkKafkaProducer09<String> producerLogging = new FlinkKafkaProducer09<>(
-					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner<String>)null);
+					"mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), (FlinkKafkaPartitioner<String>) null);
 			producerLogging.setLogFailuresOnly(true);
 
 			testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink(producerLogging));

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
index 84fdbf8..fc38e24 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
@@ -15,9 +15,17 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.operators.StreamSink;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.connectors.kafka.testutils.ZooKeeperStringSerializer;
+import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
+import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
+import org.apache.flink.util.NetUtils;
+
 import kafka.admin.AdminUtils;
 import kafka.api.PartitionMetadata;
 import kafka.common.KafkaException;
@@ -29,21 +37,12 @@ import kafka.utils.ZkUtils;
 import org.I0Itec.zkclient.ZkClient;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingServer;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.datastream.DataStreamSink;
-import org.apache.flink.streaming.api.operators.StreamSink;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.streaming.connectors.kafka.testutils.ZooKeeperStringSerializer;
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
-import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
-import org.apache.flink.util.NetUtils;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.protocol.SecurityProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.collection.Seq;
 
 import java.io.File;
 import java.net.BindException;
@@ -54,12 +53,14 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.UUID;
 
+import scala.collection.Seq;
+
 import static org.apache.flink.util.NetUtils.hostAndPortToUrlString;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
- * An implementation of the KafkaServerProvider for Kafka 0.9
+ * An implementation of the KafkaServerProvider for Kafka 0.9 .
  */
 public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 
@@ -166,7 +167,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 	public void prepare(int numKafkaServers, Properties additionalServerProperties, boolean secureMode) {
 
 		//increase the timeout since in Travis ZK connection takes long time for secure connection.
-		if(secureMode) {
+		if (secureMode) {
 			//run only one kafka server to avoid multiple ZK connections from many instances - Travis timeout
 			numKafkaServers = 1;
 			zkTimeout = String.valueOf(Integer.parseInt(zkTimeout) * 15);
@@ -205,7 +206,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 				brokers.add(getKafkaServer(i, tmpKafkaDirs.get(i)));
 
 				SocketServer socketServer = brokers.get(i).socketServer();
-				if(secureMode) {
+				if (secureMode) {
 					brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.SASL_PLAINTEXT)) + ",";
 				} else {
 					brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.PLAINTEXT)) + ",";
@@ -298,7 +299,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		final long deadline = System.nanoTime() + Integer.parseInt(zkTimeout) * 1_000_000L;
 		do {
 			try {
-				if(secureMode) {
+				if (secureMode) {
 					//increase wait time since in Travis ZK timeout occurs frequently
 					int wait = Integer.parseInt(zkTimeout) / 100;
 					LOG.info("waiting for {} msecs before the topic {} can be checked", wait, topic);
@@ -317,7 +318,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 
 			// create a new ZK utils connection
 			ZkUtils checkZKConn = getZkUtils();
-			if(AdminUtils.topicExists(checkZKConn, topic)) {
+			if (AdminUtils.topicExists(checkZKConn, topic)) {
 				LOG.info("topic {} has been created successfully", topic);
 				checkZKConn.close();
 				return;
@@ -347,7 +348,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 	}
 
 	/**
-	 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed)
+	 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 	 */
 	protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
 		Properties kafkaProperties = new Properties();
@@ -363,7 +364,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		// for CI stability, increase zookeeper session timeout
 		kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
 		kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
-		if(additionalServerProperties != null) {
+		if (additionalServerProperties != null) {
 			kafkaProperties.putAll(additionalServerProperties);
 		}
 
@@ -374,7 +375,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 			kafkaProperties.put("port", Integer.toString(kafkaPort));
 
 			//to support secure kafka cluster
-			if(secureMode) {
+			if (secureMode) {
 				LOG.info("Adding Kafka secure configurations");
 				kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
 				kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
@@ -405,7 +406,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 
 	public Properties getSecureProperties() {
 		Properties prop = new Properties();
-		if(secureMode) {
+		if (secureMode) {
 			prop.put("security.inter.broker.protocol", "SASL_PLAINTEXT");
 			prop.put("security.protocol", "SASL_PLAINTEXT");
 			prop.put("sasl.kerberos.service.name", "kafka");
@@ -413,7 +414,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 			//add special timeout for Travis
 			prop.setProperty("zookeeper.session.timeout.ms", zkTimeout);
 			prop.setProperty("zookeeper.connection.timeout.ms", zkTimeout);
-			prop.setProperty("metadata.fetch.timeout.ms","120000");
+			prop.setProperty("metadata.fetch.timeout.ms", "120000");
 		}
 		return prop;
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/internal/HandoverTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/internal/HandoverTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/internal/HandoverTest.java
index e95b51b..5bd4aff 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/internal/HandoverTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/internal/HandoverTest.java
@@ -20,8 +20,8 @@ package org.apache.flink.streaming.connectors.kafka.internal;
 
 import org.apache.flink.streaming.connectors.kafka.internal.Handover.WakeupException;
 import org.apache.flink.util.ExceptionUtils;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
 
+import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -34,7 +34,7 @@ import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
 
 /**
- * Tests for the {@link Handover} between Kafka Consumer Thread and the fetcher's main thread. 
+ * Tests for the {@link Handover} between Kafka Consumer Thread and the fetcher's main thread.
  */
 public class HandoverTest {
 
@@ -219,7 +219,7 @@ public class HandoverTest {
 
 		// empty the handover
 		assertNotNull(handover.pollNext());
-		
+
 		// producing into an empty handover should work
 		try {
 			handover.produce(createTestRecords());
@@ -292,7 +292,7 @@ public class HandoverTest {
 
 	// ------------------------------------------------------------------------
 
-	private static abstract class CheckedThread extends Thread {
+	private abstract static class CheckedThread extends Thread {
 
 		private volatile Throwable error;
 
@@ -317,7 +317,7 @@ public class HandoverTest {
 
 		public void waitUntilThreadHoldsLock(long timeoutMillis) throws InterruptedException, TimeoutException {
 			final long deadline = System.nanoTime() + timeoutMillis * 1_000_000;
-			
+
 			while (!isBlockedOrWaiting() && (System.nanoTime() < deadline)) {
 				Thread.sleep(1);
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.9/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/resources/log4j-test.properties b/flink-connectors/flink-connector-kafka-0.9/src/test/resources/log4j-test.properties
index 4ac1773..bc93a2d 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/resources/log4j-test.properties
@@ -29,4 +29,4 @@ log4j.logger.org.apache.zookeeper=OFF, testlogger
 log4j.logger.state.change.logger=OFF, testlogger
 log4j.logger.kafka=OFF, testlogger
 
-log4j.logger.org.apache.directory=OFF, testlogger
\ No newline at end of file
+log4j.logger.org.apache.directory=OFF, testlogger

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/pom.xml b/flink-connectors/flink-connector-kafka-base/pom.xml
index 2cc94b0..fc0045e 100644
--- a/flink-connectors/flink-connector-kafka-base/pom.xml
+++ b/flink-connectors/flink-connector-kafka-base/pom.xml
@@ -106,7 +106,7 @@ under the License.
 		</dependency>
 
 		<!-- test dependencies -->
-		
+
 		<!-- force using the latest zkclient -->
 		<dependency>
 			<groupId>com.101tec</groupId>
@@ -187,7 +187,6 @@ under the License.
 			</dependency>
 		</dependencies>
 	</dependencyManagement>
-	
 
 	<build>
 		<plugins>
@@ -233,5 +232,5 @@ under the License.
 			</plugin>
 		</plugins>
 	</build>
-	
+
 </project>

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
index 87bedce..18748d0 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
@@ -17,7 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.commons.collections.map.LinkedMap;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.OperatorStateStore;
@@ -46,6 +45,8 @@ import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.Preconditions;
 import org.apache.flink.util.SerializedValue;
+
+import org.apache.commons.collections.map.LinkedMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,13 +61,13 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * Base class of all Flink Kafka Consumer data sources.
  * This implements the common behavior across all Kafka versions.
- * 
+ *
  * <p>The Kafka version specific behavior is defined mainly in the specific subclasses of the
  * {@link AbstractFetcher}.
- * 
+ *
  * @param <T> The type of records produced by this data source
  */
-public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFunction<T> implements 
+public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFunction<T> implements
 		CheckpointListener,
 		ResultTypeQueryable<T>,
 		CheckpointedFunction,
@@ -75,11 +76,11 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	private static final long serialVersionUID = -6272159445203409112L;
 
 	protected static final Logger LOG = LoggerFactory.getLogger(FlinkKafkaConsumerBase.class);
-	
-	/** The maximum number of pending non-committed checkpoints to track, to avoid memory leaks */
+
+	/** The maximum number of pending non-committed checkpoints to track, to avoid memory leaks. */
 	public static final int MAX_NUM_PENDING_CHECKPOINTS = 100;
 
-	/** Boolean configuration key to disable metrics tracking **/
+	/** Boolean configuration key to disable metrics tracking. **/
 	public static final String KEY_DISABLE_METRICS = "flink.disable-metrics";
 
 	// ------------------------------------------------------------------------
@@ -87,20 +88,20 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	// ------------------------------------------------------------------------
 
 	private final List<String> topics;
-	
-	/** The schema to convert between Kafka's byte messages, and Flink's objects */
+
+	/** The schema to convert between Kafka's byte messages, and Flink's objects. */
 	protected final KeyedDeserializationSchema<T> deserializer;
 
-	/** The set of topic partitions that the source will read, with their initial offsets to start reading from */
+	/** The set of topic partitions that the source will read, with their initial offsets to start reading from. */
 	private Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets;
-	
+
 	/** Optional timestamp extractor / watermark generator that will be run per Kafka partition,
 	 * to exploit per-partition timestamp characteristics.
 	 * The assigner is kept in serialized form, to deserialize it into multiple copies */
 	private SerializedValue<AssignerWithPeriodicWatermarks<T>> periodicWatermarkAssigner;
-	
+
 	/** Optional timestamp extractor / watermark generator that will be run per Kafka partition,
-	 * to exploit per-partition timestamp characteristics. 
+	 * to exploit per-partition timestamp characteristics.
 	 * The assigner is kept in serialized form, to deserialize it into multiple copies */
 	private SerializedValue<AssignerWithPunctuatedWatermarks<T>> punctuatedWatermarkAssigner;
 
@@ -119,26 +120,26 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	 */
 	private OffsetCommitMode offsetCommitMode;
 
-	/** The startup mode for the consumer (default is {@link StartupMode#GROUP_OFFSETS}) */
+	/** The startup mode for the consumer (default is {@link StartupMode#GROUP_OFFSETS}). */
 	private StartupMode startupMode = StartupMode.GROUP_OFFSETS;
 
-	/** Specific startup offsets; only relevant when startup mode is {@link StartupMode#SPECIFIC_OFFSETS} */
+	/** Specific startup offsets; only relevant when startup mode is {@link StartupMode#SPECIFIC_OFFSETS}. */
 	protected Map<KafkaTopicPartition, Long> specificStartupOffsets;
 
 	// ------------------------------------------------------------------------
-	//  runtime state (used individually by each parallel subtask) 
+	//  runtime state (used individually by each parallel subtask)
 	// ------------------------------------------------------------------------
-	
-	/** Data for pending but uncommitted offsets */
+
+	/** Data for pending but uncommitted offsets. */
 	private final LinkedMap pendingOffsetsToCommit = new LinkedMap();
 
-	/** The fetcher implements the connections to the Kafka brokers */
+	/** The fetcher implements the connections to the Kafka brokers. */
 	private transient volatile AbstractFetcher<T, ?> kafkaFetcher;
-	
-	/** The offsets to restore to, if the consumer restores state from a checkpoint */
+
+	/** The offsets to restore to, if the consumer restores state from a checkpoint. */
 	private transient volatile HashMap<KafkaTopicPartition, Long> restoredState;
-	
-	/** Flag indicating whether the consumer is still running **/
+
+	/** Flag indicating whether the consumer is still running. **/
 	private volatile boolean running = true;
 
 	// ------------------------------------------------------------------------
@@ -158,30 +159,30 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	// ------------------------------------------------------------------------
 	//  Configuration
 	// ------------------------------------------------------------------------
-	
+
 	/**
 	 * Specifies an {@link AssignerWithPunctuatedWatermarks} to emit watermarks in a punctuated manner.
 	 * The watermark extractor will run per Kafka partition, watermarks will be merged across partitions
 	 * in the same way as in the Flink runtime, when streams are merged.
-	 * 
+	 *
 	 * <p>When a subtask of a FlinkKafkaConsumer source reads multiple Kafka partitions,
 	 * the streams from the partitions are unioned in a "first come first serve" fashion. Per-partition
 	 * characteristics are usually lost that way. For example, if the timestamps are strictly ascending
 	 * per Kafka partition, they will not be strictly ascending in the resulting Flink DataStream, if the
 	 * parallel source subtask reads more that one partition.
-	 * 
+	 *
 	 * <p>Running timestamp extractors / watermark generators directly inside the Kafka source, per Kafka
 	 * partition, allows users to let them exploit the per-partition characteristics.
-	 * 
+	 *
 	 * <p>Note: One can use either an {@link AssignerWithPunctuatedWatermarks} or an
 	 * {@link AssignerWithPeriodicWatermarks}, not both at the same time.
-	 * 
+	 *
 	 * @param assigner The timestamp assigner / watermark generator to use.
 	 * @return The consumer object, to allow function chaining.
 	 */
 	public FlinkKafkaConsumerBase<T> assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks<T> assigner) {
 		checkNotNull(assigner);
-		
+
 		if (this.periodicWatermarkAssigner != null) {
 			throw new IllegalStateException("A periodic watermark emitter has already been set.");
 		}
@@ -216,7 +217,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	 */
 	public FlinkKafkaConsumerBase<T> assignTimestampsAndWatermarks(AssignerWithPeriodicWatermarks<T> assigner) {
 		checkNotNull(assigner);
-		
+
 		if (this.punctuatedWatermarkAssigner != null) {
 			throw new IllegalStateException("A punctuated watermark emitter has already been set.");
 		}
@@ -232,7 +233,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	/**
 	 * Specifies whether or not the consumer should commit offsets back to Kafka on checkpoints.
 	 *
-	 * This setting will only have effect if checkpointing is enabled for the job.
+	 * <p>This setting will only have effect if checkpointing is enabled for the job.
 	 * If checkpointing isn't enabled, only the "auto.commit.enable" (for 0.8) / "enable.auto.commit" (for 0.9+)
 	 * property settings will be
 	 *
@@ -247,7 +248,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	 * Specifies the consumer to start reading from the earliest offset for all partitions.
 	 * This lets the consumer ignore any committed group offsets in Zookeeper / Kafka brokers.
 	 *
-	 * This method does not effect where partitions are read from when the consumer is restored
+	 * <p>This method does not effect where partitions are read from when the consumer is restored
 	 * from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
 	 * savepoint, only the offsets in the restored state will be used.
 	 *
@@ -263,7 +264,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	 * Specifies the consumer to start reading from the latest offset for all partitions.
 	 * This lets the consumer ignore any committed group offsets in Zookeeper / Kafka brokers.
 	 *
-	 * This method does not effect where partitions are read from when the consumer is restored
+	 * <p>This method does not effect where partitions are read from when the consumer is restored
 	 * from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
 	 * savepoint, only the offsets in the restored state will be used.
 	 *
@@ -281,7 +282,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	 * properties. If no offset can be found for a partition, the behaviour in "auto.offset.reset"
 	 * set in the configuration properties will be used for the partition.
 	 *
-	 * This method does not effect where partitions are read from when the consumer is restored
+	 * <p>This method does not effect where partitions are read from when the consumer is restored
 	 * from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
 	 * savepoint, only the offsets in the restored state will be used.
 	 *
@@ -298,16 +299,16 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	 * The specified offset should be the offset of the next record that will be read from partitions.
 	 * This lets the consumer ignore any committed group offsets in Zookeeper / Kafka brokers.
 	 *
-	 * If the provided map of offsets contains entries whose {@link KafkaTopicPartition} is not subscribed by the
+	 * <p>If the provided map of offsets contains entries whose {@link KafkaTopicPartition} is not subscribed by the
 	 * consumer, the entry will be ignored. If the consumer subscribes to a partition that does not exist in the provided
 	 * map of offsets, the consumer will fallback to the default group offset behaviour (see
 	 * {@link FlinkKafkaConsumerBase#setStartFromGroupOffsets()}) for that particular partition.
 	 *
-	 * If the specified offset for a partition is invalid, or the behaviour for that partition is defaulted to group
+	 * <p>If the specified offset for a partition is invalid, or the behaviour for that partition is defaulted to group
 	 * offsets but still no group offset could be found for it, then the "auto.offset.reset" behaviour set in the
 	 * configuration properties will be used for the partition
 	 *
-	 * This method does not effect where partitions are read from when the consumer is restored
+	 * <p>This method does not effect where partitions are read from when the consumer is restored
 	 * from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
 	 * savepoint, only the offsets in the restored state will be used.
 	 *
@@ -444,7 +445,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 			if (!running) {
 				return;
 			}
-			
+
 			// (3) run the fetcher' main work method
 			fetcher.runFetchLoop();
 		}
@@ -476,7 +477,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	public void cancel() {
 		// set ourselves as not running
 		running = false;
-		
+
 		// abort the fetcher, if there is one
 		if (kafkaFetcher != null) {
 			kafkaFetcher.cancel();
@@ -494,7 +495,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 			super.close();
 		}
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Checkpoint and restore
 	// ------------------------------------------------------------------------
@@ -635,19 +636,19 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	// ------------------------------------------------------------------------
 	//  Kafka Consumer specific methods
 	// ------------------------------------------------------------------------
-	
+
 	/**
 	 * Creates the fetcher that connect to the Kafka brokers, pulls data, deserialized the
 	 * data, and emits it into the data streams.
-	 * 
+	 *
 	 * @param sourceContext The source context to emit data to.
 	 * @param subscribedPartitionsToStartOffsets The set of partitions that this subtask should handle, with their start offsets.
 	 * @param watermarksPeriodic Optional, a serialized timestamp extractor / periodic watermark generator.
 	 * @param watermarksPunctuated Optional, a serialized timestamp extractor / punctuated watermark generator.
 	 * @param runtimeContext The task's runtime context.
-	 * 
+	 *
 	 * @return The instantiated fetcher
-	 * 
+	 *
 	 * @throws Exception The method should forward exceptions
 	 */
 	protected abstract AbstractFetcher<T, ?> createFetcher(
@@ -661,11 +662,11 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	protected abstract List<KafkaTopicPartition> getKafkaPartitions(List<String> topics);
 
 	protected abstract boolean getIsAutoCommitEnabled();
-	
+
 	// ------------------------------------------------------------------------
-	//  ResultTypeQueryable methods 
+	//  ResultTypeQueryable methods
 	// ------------------------------------------------------------------------
-	
+
 	@Override
 	public TypeInformation<T> getProducedType() {
 		return deserializer.getProducedType();
@@ -726,7 +727,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 
 	/**
 	 * Logs the partition information in INFO level.
-	 * 
+	 *
 	 * @param logger The logger to log to.
 	 * @param partitionInfos List of subscribed partitions
 	 */
@@ -743,11 +744,11 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 		}
 		StringBuilder sb = new StringBuilder(
 				"Consumer is going to read the following topics (with number of partitions): ");
-		
+
 		for (Map.Entry<String, Integer> e : countPerTopic.entrySet()) {
 			sb.append(e.getKey()).append(" (").append(e.getValue()).append("), ");
 		}
-		
+
 		logger.info(sb.toString());
 	}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
index 46d7d47..76a2f84 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
@@ -17,14 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.java.ClosureCleaner;
@@ -41,6 +33,7 @@ import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegat
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
 import org.apache.flink.util.NetUtils;
+
 import org.apache.kafka.clients.producer.Callback;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerConfig;
@@ -53,13 +46,20 @@ import org.apache.kafka.common.serialization.ByteArraySerializer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static java.util.Objects.requireNonNull;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
 
+import static java.util.Objects.requireNonNull;
 
 /**
  * Flink Sink to produce data into a Kafka topic.
  *
- * Please note that this producer provides at-least-once reliability guarantees when
+ * <p>Please note that this producer provides at-least-once reliability guarantees when
  * checkpoints are enabled and setFlushOnCheckpoint(true) is set.
  * Otherwise, the producer doesn't provide any reliability guarantees.
  *
@@ -98,7 +98,7 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 	protected final FlinkKafkaPartitioner<IN> flinkKafkaPartitioner;
 
 	/**
-	 * Partitions of each topic
+	 * Partitions of each topic.
 	 */
 	protected final Map<String, int[]> topicPartitionsMap;
 
@@ -114,16 +114,16 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 
 	// -------------------------------- Runtime fields ------------------------------------------
 
-	/** KafkaProducer instance */
+	/** KafkaProducer instance. */
 	protected transient KafkaProducer<byte[], byte[]> producer;
 
-	/** The callback than handles error propagation or logging callbacks */
+	/** The callback than handles error propagation or logging callbacks. */
 	protected transient Callback callback;
 
-	/** Errors encountered in the async producer are stored here */
+	/** Errors encountered in the async producer are stored here. */
 	protected transient volatile Exception asyncException;
 
-	/** Lock for accessing the pending records */
+	/** Lock for accessing the pending records. */
 	protected final SerializableObject pendingRecordsLock = new SerializableObject();
 
 	/** Number of unacknowledged records. */
@@ -196,9 +196,10 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 	}
 
 	/**
-	 * Used for testing only
+	 * Used for testing only.
 	 */
-	protected <K,V> KafkaProducer<K,V> getKafkaProducer(Properties props) {
+	@VisibleForTesting
+	protected <K, V> KafkaProducer<K, V> getKafkaProducer(Properties props) {
 		return new KafkaProducer<>(props);
 	}
 
@@ -213,8 +214,8 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 
 		RuntimeContext ctx = getRuntimeContext();
 
-		if(null != flinkKafkaPartitioner) {
-			if(flinkKafkaPartitioner instanceof FlinkKafkaDelegatePartitioner) {
+		if (null != flinkKafkaPartitioner) {
+			if (flinkKafkaPartitioner instanceof FlinkKafkaDelegatePartitioner) {
 				((FlinkKafkaDelegatePartitioner) flinkKafkaPartitioner).setPartitions(
 						getPartitionsByTopic(this.defaultTopicId, this.producer));
 			}
@@ -239,7 +240,7 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 			}
 		}
 
-		if (flushOnCheckpoint && !((StreamingRuntimeContext)this.getRuntimeContext()).isCheckpointingEnabled()) {
+		if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
 			LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
 			flushOnCheckpoint = false;
 		}
@@ -287,7 +288,7 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 		}
 
 		int[] partitions = this.topicPartitionsMap.get(targetTopic);
-		if(null == partitions) {
+		if (null == partitions) {
 			partitions = getPartitionsByTopic(targetTopic, producer);
 			this.topicPartitionsMap.put(targetTopic, partitions);
 		}
@@ -310,7 +311,6 @@ public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> im
 		producer.send(record, callback);
 	}
 
-
 	@Override
 	public void close() throws Exception {
 		if (producer != null) {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
index b88fb83..5c9a629 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
@@ -18,13 +18,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.List;
-import java.util.Properties;
-import org.apache.avro.Schema;
-import org.apache.avro.specific.SpecificData;
-import org.apache.avro.specific.SpecificRecord;
-import org.apache.avro.specific.SpecificRecordBase;
-import org.apache.avro.util.Utf8;
 import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.AvroTypeInfo;
@@ -35,6 +28,15 @@ import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.types.Row;
 
+import org.apache.avro.Schema;
+import org.apache.avro.specific.SpecificData;
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.avro.specific.SpecificRecordBase;
+import org.apache.avro.util.Utf8;
+
+import java.util.List;
+import java.util.Properties;
+
 /**
  * A version-agnostic Kafka Avro {@link StreamTableSource}.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
index 41bb329..51fd952 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
@@ -25,12 +26,12 @@ import org.apache.flink.types.Row;
 import java.util.Properties;
 
 /**
- * Base class for {@link KafkaTableSink} that serializes data in JSON format
+ * Base class for {@link KafkaTableSink} that serializes data in JSON format.
  */
 public abstract class KafkaJsonTableSink extends KafkaTableSink {
-	
+
 	/**
-	 * Creates KafkaJsonTableSink
+	 * Creates KafkaJsonTableSink.
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
index 460f948..1c8e0a0 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
@@ -19,12 +19,12 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
-import org.apache.flink.types.Row;
 
 /**
  * A version-agnostic Kafka JSON {@link StreamTableSource}.

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
index 1c38816..a94936c 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
@@ -15,15 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.types.Row;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.table.sinks.AppendStreamTableSink;
 import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+import org.apache.flink.table.sinks.AppendStreamTableSink;
+import org.apache.flink.types.Row;
 import org.apache.flink.util.Preconditions;
 
 import java.util.Properties;
@@ -44,8 +45,8 @@ public abstract class KafkaTableSink implements AppendStreamTableSink<Row> {
 	protected TypeInformation[] fieldTypes;
 
 	/**
-	 * Creates KafkaTableSink
-	 * 
+	 * Creates KafkaTableSink.
+	 *
 	 * @param topic                 Kafka topic to write to.
 	 * @param properties            Properties for the Kafka consumer.
 	 * @param partitioner           Partitioner to select Kafka partition for each item

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
index 029aa45..8969f90 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
@@ -18,7 +18,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
@@ -27,6 +26,8 @@ import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.types.Row;
 import org.apache.flink.util.Preconditions;
 
+import java.util.Properties;
+
 /**
  * A version-agnostic Kafka {@link StreamTableSource}.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
index 8bb75b4..0642e7e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
@@ -21,7 +21,7 @@ package org.apache.flink.streaming.connectors.kafka.config;
  * The offset commit mode represents the behaviour of how offsets are externally committed
  * back to Kafka brokers / Zookeeper.
  *
- * The exact value of this is determined at runtime in the consumer subtasks.
+ * <p>The exact value of this is determined at runtime in the consumer subtasks.
  */
 public enum OffsetCommitMode {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
index 8fc2fe0..81c4138 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka.config;
 
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionStateSentinel;
@@ -23,13 +24,13 @@ import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition
  */
 public enum StartupMode {
 
-	/** Start from committed offsets in ZK / Kafka brokers of a specific consumer group (default) */
+	/** Start from committed offsets in ZK / Kafka brokers of a specific consumer group (default). */
 	GROUP_OFFSETS(KafkaTopicPartitionStateSentinel.GROUP_OFFSET),
 
-	/** Start from the earliest offset possible */
+	/** Start from the earliest offset possible. */
 	EARLIEST(KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET),
 
-	/** Start from the latest offset */
+	/** Start from the latest offset. */
 	LATEST(KafkaTopicPartitionStateSentinel.LATEST_OFFSET),
 
 	/**
@@ -39,7 +40,7 @@ public enum StartupMode {
 	 */
 	SPECIFIC_OFFSETS(Long.MIN_VALUE);
 
-	/** The sentinel offset value corresponding to this startup mode */
+	/** The sentinel offset value corresponding to this startup mode. */
 	private long stateSentinel;
 
 	StartupMode(long stateSentinel) {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
index 0b311a9..cfd7c3b 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
@@ -38,43 +38,43 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * Base class for all fetchers, which implement the connections to Kafka brokers and
  * pull records from Kafka partitions.
- * 
+ *
  * <p>This fetcher base class implements the logic around emitting records and tracking offsets,
- * as well as around the optional timestamp assignment and watermark generation. 
- * 
+ * as well as around the optional timestamp assignment and watermark generation.
+ *
  * @param <T> The type of elements deserialized from Kafka's byte records, and emitted into
  *            the Flink data streams.
  * @param <KPH> The type of topic/partition identifier used by Kafka in the specific version.
  */
 public abstract class AbstractFetcher<T, KPH> {
-	
+
 	protected static final int NO_TIMESTAMPS_WATERMARKS = 0;
 	protected static final int PERIODIC_WATERMARKS = 1;
 	protected static final int PUNCTUATED_WATERMARKS = 2;
-	
+
 	// ------------------------------------------------------------------------
-	
-	/** The source context to emit records and watermarks to */
+
+	/** The source context to emit records and watermarks to. */
 	protected final SourceContext<T> sourceContext;
 
 	/** The lock that guarantees that record emission and state updates are atomic,
-	 * from the view of taking a checkpoint */
+	 * from the view of taking a checkpoint. */
 	protected final Object checkpointLock;
 
-	/** All partitions (and their state) that this fetcher is subscribed to */
+	/** All partitions (and their state) that this fetcher is subscribed to. */
 	private final KafkaTopicPartitionState<KPH>[] subscribedPartitionStates;
 
-	/** The mode describing whether the fetcher also generates timestamps and watermarks */
+	/** The mode describing whether the fetcher also generates timestamps and watermarks. */
 	protected final int timestampWatermarkMode;
 
-	/** Flag whether to register metrics for the fetcher */
+	/** Flag whether to register metrics for the fetcher. */
 	protected final boolean useMetrics;
 
-	/** Only relevant for punctuated watermarks: The current cross partition watermark */
+	/** Only relevant for punctuated watermarks: The current cross partition watermark. */
 	private volatile long maxWatermarkSoFar = Long.MIN_VALUE;
 
 	// ------------------------------------------------------------------------
-	
+
 	protected AbstractFetcher(
 			SourceContext<T> sourceContext,
 			Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
@@ -83,14 +83,13 @@ public abstract class AbstractFetcher<T, KPH> {
 			ProcessingTimeService processingTimeProvider,
 			long autoWatermarkInterval,
 			ClassLoader userCodeClassLoader,
-			boolean useMetrics) throws Exception
-	{
+			boolean useMetrics) throws Exception {
 		this.sourceContext = checkNotNull(sourceContext);
 		this.checkpointLock = sourceContext.getCheckpointLock();
 		this.useMetrics = useMetrics;
-		
+
 		// figure out what we watermark mode we will be using
-		
+
 		if (watermarksPeriodic == null) {
 			if (watermarksPunctuated == null) {
 				// simple case, no watermarks involved
@@ -106,7 +105,7 @@ public abstract class AbstractFetcher<T, KPH> {
 			}
 		}
 
-		// create our partition state according to the timestamp/watermark mode 
+		// create our partition state according to the timestamp/watermark mode
 		this.subscribedPartitionStates = initializeSubscribedPartitionStates(
 				assignedPartitionsWithInitialOffsets,
 				timestampWatermarkMode,
@@ -119,13 +118,13 @@ public abstract class AbstractFetcher<T, KPH> {
 				throw new IllegalArgumentException("The fetcher was assigned partitions with undefined initial offsets.");
 			}
 		}
-		
+
 		// if we have periodic watermarks, kick off the interval scheduler
 		if (timestampWatermarkMode == PERIODIC_WATERMARKS) {
-			KafkaTopicPartitionStateWithPeriodicWatermarks<?, ?>[] parts = 
+			KafkaTopicPartitionStateWithPeriodicWatermarks<?, ?>[] parts =
 					(KafkaTopicPartitionStateWithPeriodicWatermarks<?, ?>[]) subscribedPartitionStates;
-			
-			PeriodicWatermarkEmitter periodicEmitter = 
+
+			PeriodicWatermarkEmitter periodicEmitter =
 					new PeriodicWatermarkEmitter(parts, sourceContext, processingTimeProvider, autoWatermarkInterval);
 			periodicEmitter.start();
 		}
@@ -149,17 +148,17 @@ public abstract class AbstractFetcher<T, KPH> {
 	// ------------------------------------------------------------------------
 
 	public abstract void runFetchLoop() throws Exception;
-	
+
 	public abstract void cancel();
 
 	// ------------------------------------------------------------------------
 	//  Kafka version specifics
 	// ------------------------------------------------------------------------
-	
+
 	/**
 	 * Creates the Kafka version specific representation of the given
 	 * topic partition.
-	 * 
+	 *
 	 * @param partition The Flink representation of the Kafka topic partition.
 	 * @return The specific Kafka representation of the Kafka topic partition.
 	 */
@@ -170,7 +169,7 @@ public abstract class AbstractFetcher<T, KPH> {
 	 * older Kafka versions). This method is only ever called when the offset commit mode of
 	 * the consumer is {@link OffsetCommitMode#ON_CHECKPOINTS}.
 	 *
-	 * The given offsets are the internal checkpointed offsets, representing
+	 * <p>The given offsets are the internal checkpointed offsets, representing
 	 * the last processed record of each partition. Version-specific implementations of this method
 	 * need to hold the contract that the given offsets must be incremented by 1 before
 	 * committing them, so that committed offsets to Kafka represent "the next record to process".
@@ -179,16 +178,16 @@ public abstract class AbstractFetcher<T, KPH> {
 	 * @throws Exception This method forwards exceptions.
 	 */
 	public abstract void commitInternalOffsetsToKafka(Map<KafkaTopicPartition, Long> offsets) throws Exception;
-	
+
 	// ------------------------------------------------------------------------
 	//  snapshot and restore the state
 	// ------------------------------------------------------------------------
 
 	/**
 	 * Takes a snapshot of the partition offsets.
-	 * 
+	 *
 	 * <p>Important: This method mus be called under the checkpoint lock.
-	 * 
+	 *
 	 * @return A map from partition to current offset.
 	 */
 	public HashMap<KafkaTopicPartition, Long> snapshotCurrentState() {
@@ -208,10 +207,10 @@ public abstract class AbstractFetcher<T, KPH> {
 
 	/**
 	 * Emits a record without attaching an existing timestamp to it.
-	 * 
+	 *
 	 * <p>Implementation Note: This method is kept brief to be JIT inlining friendly.
 	 * That makes the fast path efficient, the extended paths are called as separate methods.
-	 * 
+	 *
 	 * @param record The record to emit
 	 * @param partitionState The state of the Kafka partition from which the record was fetched
 	 * @param offset The offset of the record
@@ -282,8 +281,7 @@ public abstract class AbstractFetcher<T, KPH> {
 	 * also a periodic watermark generator.
 	 */
 	protected void emitRecordWithTimestampAndPeriodicWatermark(
-			T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long kafkaEventTimestamp)
-	{
+			T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long kafkaEventTimestamp) {
 		@SuppressWarnings("unchecked")
 		final KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH> withWatermarksState =
 				(KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH>) partitionState;
@@ -298,7 +296,7 @@ public abstract class AbstractFetcher<T, KPH> {
 		}
 
 		// emit the record with timestamp, using the usual checkpoint lock to guarantee
-		// atomicity of record emission and offset state update 
+		// atomicity of record emission and offset state update
 		synchronized (checkpointLock) {
 			sourceContext.collectWithTimestamp(record, timestamp);
 			partitionState.setOffset(offset);
@@ -310,8 +308,7 @@ public abstract class AbstractFetcher<T, KPH> {
 	 * also a punctuated watermark generator.
 	 */
 	protected void emitRecordWithTimestampAndPunctuatedWatermark(
-			T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long kafkaEventTimestamp)
-	{
+			T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long kafkaEventTimestamp) {
 		@SuppressWarnings("unchecked")
 		final KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> withWatermarksState =
 				(KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH>) partitionState;
@@ -322,7 +319,7 @@ public abstract class AbstractFetcher<T, KPH> {
 		final Watermark newWatermark = withWatermarksState.checkAndGetNewWatermark(record, timestamp);
 
 		// emit the record with timestamp, using the usual checkpoint lock to guarantee
-		// atomicity of record emission and offset state update 
+		// atomicity of record emission and offset state update
 		synchronized (checkpointLock) {
 			sourceContext.collectWithTimestamp(record, timestamp);
 			partitionState.setOffset(offset);
@@ -346,7 +343,7 @@ public abstract class AbstractFetcher<T, KPH> {
 				@SuppressWarnings("unchecked")
 				final KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> withWatermarksState =
 						(KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH>) state;
-				
+
 				newMin = Math.min(newMin, withWatermarksState.getCurrentPartitionWatermark());
 			}
 
@@ -375,11 +372,9 @@ public abstract class AbstractFetcher<T, KPH> {
 			int timestampWatermarkMode,
 			SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
 			SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
-			ClassLoader userCodeClassLoader)
-		throws IOException, ClassNotFoundException
-	{
+			ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException {
 		switch (timestampWatermarkMode) {
-			
+
 			case NO_TIMESTAMPS_WATERMARKS: {
 				@SuppressWarnings("unchecked")
 				KafkaTopicPartitionState<KPH>[] partitions =
@@ -410,7 +405,7 @@ public abstract class AbstractFetcher<T, KPH> {
 
 					AssignerWithPeriodicWatermarks<T> assignerInstance =
 							watermarksPeriodic.deserializeValue(userCodeClassLoader);
-					
+
 					partitions[pos] = new KafkaTopicPartitionStateWithPeriodicWatermarks<>(
 							partition.getKey(), kafkaHandle, assignerInstance);
 					partitions[pos].setOffset(partition.getValue());
@@ -452,7 +447,7 @@ public abstract class AbstractFetcher<T, KPH> {
 	// ------------------------- Metrics ----------------------------------
 
 	/**
-	 * Add current and committed offsets to metric group
+	 * Add current and committed offsets to metric group.
 	 *
 	 * @param metricGroup The metric group to use
 	 */
@@ -467,7 +462,7 @@ public abstract class AbstractFetcher<T, KPH> {
 	}
 
 	/**
-	 * Gauge types
+	 * Gauge types.
 	 */
 	private enum OffsetGaugeType {
 		CURRENT_OFFSET,
@@ -500,7 +495,7 @@ public abstract class AbstractFetcher<T, KPH> {
 		}
 	}
  	// ------------------------------------------------------------------------
-	
+
 	/**
 	 * The periodic watermark emitter. In its given interval, it checks all partitions for
 	 * the current event time watermark, and possibly emits the next watermark.
@@ -508,23 +503,22 @@ public abstract class AbstractFetcher<T, KPH> {
 	private static class PeriodicWatermarkEmitter implements ProcessingTimeCallback {
 
 		private final KafkaTopicPartitionStateWithPeriodicWatermarks<?, ?>[] allPartitions;
-		
+
 		private final SourceContext<?> emitter;
-		
+
 		private final ProcessingTimeService timerService;
 
 		private final long interval;
-		
+
 		private long lastWatermarkTimestamp;
-		
+
 		//-------------------------------------------------
 
 		PeriodicWatermarkEmitter(
 				KafkaTopicPartitionStateWithPeriodicWatermarks<?, ?>[] allPartitions,
 				SourceContext<?> emitter,
 				ProcessingTimeService timerService,
-				long autoWatermarkInterval)
-		{
+				long autoWatermarkInterval) {
 			this.allPartitions = checkNotNull(allPartitions);
 			this.emitter = checkNotNull(emitter);
 			this.timerService = checkNotNull(timerService);
@@ -533,17 +527,17 @@ public abstract class AbstractFetcher<T, KPH> {
 		}
 
 		//-------------------------------------------------
-		
+
 		public void start() {
 			timerService.registerTimer(timerService.getCurrentProcessingTime() + interval, this);
 		}
-		
+
 		@Override
 		public void onProcessingTime(long timestamp) throws Exception {
 
 			long minAcrossAll = Long.MAX_VALUE;
 			for (KafkaTopicPartitionStateWithPeriodicWatermarks<?, ?> state : allPartitions) {
-				
+
 				// we access the current watermark for the periodic assigners under the state
 				// lock, to prevent concurrent modification to any internal variables
 				final long curr;
@@ -551,16 +545,16 @@ public abstract class AbstractFetcher<T, KPH> {
 				synchronized (state) {
 					curr = state.getCurrentWatermarkTimestamp();
 				}
-				
+
 				minAcrossAll = Math.min(minAcrossAll, curr);
 			}
-			
+
 			// emit next watermark, if there is one
 			if (minAcrossAll > lastWatermarkTimestamp) {
 				lastWatermarkTimestamp = minAcrossAll;
 				emitter.emitWatermark(new Watermark(minAcrossAll));
 			}
-			
+
 			// schedule the next watermark
 			timerService.registerTimer(timerService.getCurrentProcessingTime() + interval, this);
 		}


[18/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-filesystem

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connector-filesystem


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/7292c874
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/7292c874
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/7292c874

Branch: refs/heads/master
Commit: 7292c8743d981d61b0f860367e0266b307e1362f
Parents: fab8fe5
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:57:18 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:29 2017 +0200

----------------------------------------------------------------------
 .../flink-connector-filesystem/pom.xml          |   3 +-
 .../connectors/fs/AvroKeyValueSinkWriter.java   |  60 +++++----
 .../flink/streaming/connectors/fs/Bucketer.java |   5 +-
 .../flink/streaming/connectors/fs/Clock.java    |   8 +-
 .../connectors/fs/DateTimeBucketer.java         |  18 +--
 .../connectors/fs/NonRollingBucketer.java       |   2 +
 .../streaming/connectors/fs/RollingSink.java    | 109 ++++++++--------
 .../connectors/fs/SequenceFileWriter.java       |   5 +-
 .../connectors/fs/StreamWriterBase.java         |  21 +--
 .../streaming/connectors/fs/StringWriter.java   |   1 +
 .../streaming/connectors/fs/SystemClock.java    |   2 +-
 .../flink/streaming/connectors/fs/Writer.java   |   4 +-
 .../fs/bucketing/BasePathBucketer.java          |   2 +
 .../connectors/fs/bucketing/Bucketer.java       |   6 +-
 .../connectors/fs/bucketing/BucketingSink.java  |  93 ++++++-------
 .../fs/bucketing/DateTimeBucketer.java          |  16 ++-
 .../fs/RollingSinkFaultToleranceITCase.java     |  15 +--
 .../connectors/fs/RollingSinkITCase.java        | 130 ++++++++-----------
 .../connectors/fs/RollingSinkSecuredITCase.java |  20 +--
 .../BucketingSinkFaultToleranceITCase.java      |  17 ++-
 .../BucketingSinkFrom12MigrationTest.java       |  17 ++-
 .../fs/bucketing/BucketingSinkTest.java         |  39 +++---
 .../fs/bucketing/RollingSinkMigrationTest.java  |   7 +-
 .../RollingToBucketingMigrationTest.java        |   7 +-
 .../src/test/resources/log4j-test.properties    |   2 +-
 .../src/test/resources/log4j-test.properties    |   2 +-
 26 files changed, 315 insertions(+), 296 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/pom.xml b/flink-connectors/flink-connector-filesystem/pom.xml
index 07b0ae1..f39758b 100644
--- a/flink-connectors/flink-connector-filesystem/pom.xml
+++ b/flink-connectors/flink-connector-filesystem/pom.xml
@@ -65,7 +65,7 @@ under the License.
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
-		
+
 		<dependency>
 			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-test-utils_${scala.binary.version}</artifactId>
@@ -104,7 +104,6 @@ under the License.
 			<type>test-jar</type>
 		</dependency>
 
-
 		<dependency>
 			<groupId>org.apache.hadoop</groupId>
 			<artifactId>hadoop-hdfs</artifactId>

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/AvroKeyValueSinkWriter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/AvroKeyValueSinkWriter.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/AvroKeyValueSinkWriter.java
index 3e3c86b..45e73fe 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/AvroKeyValueSinkWriter.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/AvroKeyValueSinkWriter.java
@@ -18,10 +18,11 @@ package org.apache.flink.streaming.connectors.fs;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Map;
+import org.apache.flink.api.common.ExecutionConfig;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.api.java.typeutils.InputTypeConfigurable;
+import org.apache.flink.api.java.typeutils.TupleTypeInfoBase;
 
 import org.apache.avro.Schema;
 import org.apache.avro.file.CodecFactory;
@@ -31,15 +32,15 @@ import org.apache.avro.generic.GenericData;
 import org.apache.avro.generic.GenericDatumWriter;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.io.DatumWriter;
-import org.apache.flink.api.common.ExecutionConfig;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.api.java.typeutils.InputTypeConfigurable;
-import org.apache.flink.api.java.typeutils.TupleTypeInfoBase;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Map;
+
 /**
 * Implementation of AvroKeyValue writer that can be used in Sink.
 * Each entry would be wrapped in GenericRecord with key/value fields(same as in m/r lib)
@@ -49,7 +50,7 @@ Usage:
 		BucketingSink<Tuple2<Long, Long>> sink = new BucketingSink<Tuple2<Long, Long>>("/tmp/path");
 		sink.setBucketer(new DateTimeBucketer<Tuple2<Long, Long>>("yyyy-MM-dd/HH/mm/"));
 		sink.setPendingSuffix(".avro");
-		Map<String,String> properties = new HashMap<>();
+		Map<String, String> properties = new HashMap<>();
 		Schema longSchema = Schema.create(Type.LONG);
 		String keySchema = longSchema.toString();
 		String valueSchema = longSchema.toString();
@@ -57,7 +58,7 @@ Usage:
 		properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_VALUE_SCHEMA, valueSchema);
 		properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS, Boolean.toString(true));
 		properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS_CODEC, DataFileConstants.SNAPPY_CODEC);
-		
+
 		sink.setWriter(new AvroSinkWriter<Long, Long>(properties));
 		sink.setBatchSize(1024 * 1024 * 64); // this is 64 MB,
 }
@@ -77,37 +78,37 @@ public class AvroKeyValueSinkWriter<K, V> extends StreamWriterBase<Tuple2<K, V>>
 	private final Map<String, String> properties;
 
 	/**
-	 * C'tor for the writer
-	 * <p>
-	 * You can provide different properties that will be used to configure avro key-value writer as simple properties map(see example above)
+	 * C'tor for the writer.
+	 *
+	 * <p>You can provide different properties that will be used to configure avro key-value writer as simple properties map(see example above)
 	 * @param properties
 	 */
 	@SuppressWarnings("deprecation")
 	public AvroKeyValueSinkWriter(Map<String, String> properties) {
 		this.properties = properties;
-		
+
 		String keySchemaString = properties.get(CONF_OUTPUT_KEY_SCHEMA);
 		if (keySchemaString == null) {
 			throw new IllegalStateException("No key schema provided, set '" + CONF_OUTPUT_KEY_SCHEMA + "' property");
 		}
-		Schema.parse(keySchemaString);//verifying that schema valid
-		
+		Schema.parse(keySchemaString); //verifying that schema valid
+
 		String valueSchemaString = properties.get(CONF_OUTPUT_VALUE_SCHEMA);
 		if (valueSchemaString == null) {
 			throw new IllegalStateException("No value schema provided, set '" + CONF_OUTPUT_VALUE_SCHEMA + "' property");
 		}
-		Schema.parse(valueSchemaString);//verifying that schema valid
+		Schema.parse(valueSchemaString); //verifying that schema valid
 	}
 
-	private boolean getBoolean(Map<String,String> conf, String key, boolean def) {
+	private boolean getBoolean(Map<String, String> conf, String key, boolean def) {
 		String value = conf.get(key);
 		if (value == null) {
 			return def;
 		}
 		return Boolean.parseBoolean(value);
 	}
-	
-	private int getInt(Map<String,String> conf, String key, int def) {
+
+	private int getInt(Map<String, String> conf, String key, int def) {
 		String value = conf.get(key);
 		if (value == null) {
 			return def;
@@ -116,7 +117,7 @@ public class AvroKeyValueSinkWriter<K, V> extends StreamWriterBase<Tuple2<K, V>>
 	}
 
 	//this derived from AvroOutputFormatBase.getCompressionCodec(..)
-	private CodecFactory getCompressionCodec(Map<String,String> conf) {
+	private CodecFactory getCompressionCodec(Map<String, String> conf) {
 		if (getBoolean(conf, CONF_COMPRESS, false)) {
 			int deflateLevel = getInt(conf, CONF_DEFLATE_LEVEL, CodecFactory.DEFAULT_DEFLATE_LEVEL);
 			int xzLevel = getInt(conf, CONF_XZ_LEVEL, CodecFactory.DEFAULT_XZ_LEVEL);
@@ -147,12 +148,12 @@ public class AvroKeyValueSinkWriter<K, V> extends StreamWriterBase<Tuple2<K, V>>
 
 	@Override
 	public void close() throws IOException {
-		super.close();//the order is important since super.close flushes inside
+		super.close(); //the order is important since super.close flushes inside
 		if (keyValueWriter != null) {
 			keyValueWriter.close();
 		}
 	}
-	
+
 	@Override
 	public long flush() throws IOException {
 		if (keyValueWriter != null) {
@@ -184,7 +185,7 @@ public class AvroKeyValueSinkWriter<K, V> extends StreamWriterBase<Tuple2<K, V>>
 	public Writer<Tuple2<K, V>> duplicate() {
 		return new AvroKeyValueSinkWriter<K, V>(properties);
 	}
-	
+
 	// taken from m/r avro lib to remove dependency on it
 	private static final class AvroKeyValueWriter<K, V> {
 		/** A writer for the Avro container file. */
@@ -245,7 +246,12 @@ public class AvroKeyValueSinkWriter<K, V> extends StreamWriterBase<Tuple2<K, V>>
 		}
 	}
 
-	// taken from AvroKeyValue avro-mapr lib
+	/**
+	 * A reusable Avro generic record for writing key/value pairs to the
+	 * file.
+	 *
+	 * <p>taken from AvroKeyValue avro-mapr lib
+	 */
 	public static class AvroKeyValue<K, V> {
 		/** The name of the key value pair generic record. */
 		public static final String KEY_VALUE_PAIR_RECORD_NAME = "KeyValuePair";
@@ -293,7 +299,7 @@ public class AvroKeyValueSinkWriter<K, V> extends StreamWriterBase<Tuple2<K, V>>
 
 		/**
 		 * Creates a KeyValuePair generic record schema.
-		 * 
+		 *
 		 * @return A schema for a generic record with two fields: 'key' and
 		 *         'value'.
 		 */

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Bucketer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Bucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Bucketer.java
index 24ad6ab..9caf628 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Bucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Bucketer.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.hadoop.fs.Path;
@@ -25,8 +26,8 @@ import java.io.Serializable;
  * A bucketer is used with a {@link RollingSink}
  * to put emitted elements into rolling files.
  *
- * <p>
- * The {@code RollingSink} has one active bucket that it is writing to at a time. Whenever
+ *
+ * <p>The {@code RollingSink} has one active bucket that it is writing to at a time. Whenever
  * a new element arrives it will ask the {@code Bucketer} if a new bucket should be started and
  * the old one closed. The {@code Bucketer} can, for example, decide to start new buckets
  * based on system time.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Clock.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Clock.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Clock.java
index 174707c..eb864c2 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Clock.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Clock.java
@@ -15,19 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.fs;
 
+package org.apache.flink.streaming.connectors.fs;
 
 /**
  * A clock that can provide the current time.
  *
- * <p>
- * Normally this would be system time, but for testing a custom {@code Clock} can be provided.
+ *
+ * <p>Normally this would be system time, but for testing a custom {@code Clock} can be provided.
  */
 public interface Clock {
 
 	/**
 	 * Return the current system time in milliseconds.
 	 */
-	public long currentTimeMillis();
+	long currentTimeMillis();
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/DateTimeBucketer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/DateTimeBucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/DateTimeBucketer.java
index 0df8998..72b4823 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/DateTimeBucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/DateTimeBucketer.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.hadoop.fs.Path;
@@ -29,27 +30,27 @@ import java.util.Date;
 /**
  * A {@link Bucketer} that assigns to buckets based on current system time.
  *
- * <p>
- * The {@code DateTimeBucketer} will create directories of the following form:
+ *
+ * <p>The {@code DateTimeBucketer} will create directories of the following form:
  * {@code /{basePath}/{dateTimePath}/}. The {@code basePath} is the path
  * that was specified as a base path when creating the
  * {@link RollingSink}. The {@code dateTimePath}
  * is determined based on the current system time and the user provided format string.
  *
- * <p>
- * {@link SimpleDateFormat} is used to derive a date string from the current system time and
+ *
+ * <p>{@link SimpleDateFormat} is used to derive a date string from the current system time and
  * the date format string. The default format string is {@code "yyyy-MM-dd--HH"} so the rolling
  * files will have a granularity of hours.
  *
  *
- * <p>
- * Example:
+ *
+ * <p>Example:
  *
  * <pre>{@code
  *     Bucketer buck = new DateTimeBucketer("yyyy-MM-dd--HH");
  * }</pre>
  *
- * This will create for example the following bucket path:
+ * <p>This will create for example the following bucket path:
  * {@code /base/1976-12-31-14/}
  *
  * @deprecated use {@link org.apache.flink.streaming.connectors.fs.bucketing.DateTimeBucketer} instead.
@@ -57,7 +58,7 @@ import java.util.Date;
 @Deprecated
 public class DateTimeBucketer implements Bucketer {
 
-	private static Logger LOG = LoggerFactory.getLogger(DateTimeBucketer.class);
+	private static final Logger LOG = LoggerFactory.getLogger(DateTimeBucketer.class);
 
 	private static final long serialVersionUID = 1L;
 
@@ -95,7 +96,6 @@ public class DateTimeBucketer implements Bucketer {
 		this.dateFormatter = new SimpleDateFormat(formatString);
 	}
 
-
 	@Override
 	public boolean shouldStartNewBucket(Path basePath, Path currentBucketPath) {
 		String newDateTimeString = dateFormatter.format(new Date(clock.currentTimeMillis()));

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/NonRollingBucketer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/NonRollingBucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/NonRollingBucketer.java
index 6854596..a03bcb5 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/NonRollingBucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/NonRollingBucketer.java
@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.flink.streaming.connectors.fs.bucketing.BasePathBucketer;
+
 import org.apache.hadoop.fs.Path;
 
 /**

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/RollingSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/RollingSink.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/RollingSink.java
index 429d00a..3d3ea05 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/RollingSink.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/RollingSink.java
@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
-import org.apache.commons.lang3.time.StopWatch;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.OperatorStateStore;
@@ -33,6 +33,8 @@ import org.apache.flink.streaming.api.checkpoint.CheckpointedRestoring;
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
 import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
 import org.apache.flink.util.Preconditions;
+
+import org.apache.commons.lang3.time.StopWatch;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,13 +58,13 @@ import java.util.UUID;
  * Sink that emits its input elements to rolling {@link org.apache.hadoop.fs.FileSystem} files. This
  * is integrated with the checkpointing mechanism to provide exactly once semantics.
  *
- * <p>
- * When creating the sink a {@code basePath} must be specified. The base directory contains
+ *
+ * <p>When creating the sink a {@code basePath} must be specified. The base directory contains
  * one directory for every bucket. The bucket directories themselves contain several part files.
  * These contain the actual written data.
  *
- * <p>
- * The sink uses a {@link Bucketer} to determine the name of bucket directories inside the
+ *
+ * <p>The sink uses a {@link Bucketer} to determine the name of bucket directories inside the
  * base directory. Whenever the {@code Bucketer} returns a different directory name than
  * it returned before the sink will close the current part files inside that bucket
  * and start the new bucket directory. The default bucketer is a {@link DateTimeBucketer} with
@@ -71,8 +73,8 @@ import java.util.UUID;
  * {@link NonRollingBucketer} if you don't want to have
  * buckets but still write part files in a fault-tolerant way.
  *
- * <p>
- * The filenames of the part files contain the part prefix, the parallel subtask index of the sink
+ *
+ * <p>The filenames of the part files contain the part prefix, the parallel subtask index of the sink
  * and a rolling counter, for example {@code "part-1-17"}. Per default the part prefix is
  * {@code "part"} but this can be
  * configured using {@link #setPartPrefix(String)}. When a part file becomes bigger
@@ -80,8 +82,8 @@ import java.util.UUID;
  * a new part file is created. The batch size defaults to {@code 384MB}, this can be configured
  * using {@link #setBatchSize(long)}.
  *
- * <p>
- * Part files can be in one of three states: in-progress, pending or finished. The reason for this
+ *
+ * <p>Part files can be in one of three states: in-progress, pending or finished. The reason for this
  * is how the sink works together with the checkpointing mechanism to provide exactly-once semantics
  * and fault-tolerance. The part file that is currently being written to is in-progress. Once
  * a part file is closed for writing it becomes pending. When a checkpoint is successful the
@@ -95,21 +97,21 @@ import java.util.UUID;
  * the different file states and valid-length files can be configured, for example with
  * {@link #setPendingSuffix(String)}.
  *
- * <p>
- * Note: If checkpointing is not enabled the pending files will never be moved to the finished state.
+ *
+ * <p>Note: If checkpointing is not enabled the pending files will never be moved to the finished state.
  * In that case, the pending suffix/prefix can be set to {@code ""} to make the sink work
  * in a non-fault-tolerant way but still provide output without prefixes and suffixes.
  *
- * <p>
- * The part files are written using an instance of {@link Writer}. By default
+ *
+ * <p>The part files are written using an instance of {@link Writer}. By default
  * {@link org.apache.flink.streaming.connectors.fs.StringWriter} is used, which writes the result
  * of {@code toString()} for every element. Separated by newlines. You can configure the writer
  * using {@link #setWriter(Writer)}. For example,
  * {@link org.apache.flink.streaming.connectors.fs.SequenceFileWriter} can be used to write
  * Hadoop {@code SequenceFiles}.
  *
- * <p>
- * Example:
+ *
+ * <p>Example:
  *
  * <pre>{@code
  *     new RollingSink<Tuple2<IntWritable, Text>>(outPath)
@@ -117,7 +119,7 @@ import java.util.UUID;
  *         .setBucketer(new DateTimeBucketer("yyyy-MM-dd--HHmm")
  * }</pre>
  *
- * This will create a sink that writes to {@code SequenceFiles} and rolls every minute.
+ * <p>This will create a sink that writes to {@code SequenceFiles} and rolls every minute.
  *
  * @see DateTimeBucketer
  * @see StringWriter
@@ -134,8 +136,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 
 	private static final long serialVersionUID = 1L;
 
-	private static Logger LOG = LoggerFactory.getLogger(RollingSink.class);
-
+	private static final Logger LOG = LoggerFactory.getLogger(RollingSink.class);
 
 	// --------------------------------------------------------------------------------------------
 	//  User configuration values
@@ -145,53 +146,52 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * The default maximum size of part files (currently {@code 384 MB}).
 	 */
-	private final long DEFAULT_BATCH_SIZE = 1024L * 1024L * 384L;
+	private static final long DEFAULT_BATCH_SIZE = 1024L * 1024L * 384L;
 
 	/**
 	 * This is used for part files that we are writing to but which where not yet confirmed
 	 * by a checkpoint.
 	 */
-	private final String DEFAULT_IN_PROGRESS_SUFFIX = ".in-progress";
+	private static final String DEFAULT_IN_PROGRESS_SUFFIX = ".in-progress";
 
 	/**
-	 * See above, but for prefix
+	 * See above, but for prefix.
 	 */
-	private final String DEFAULT_IN_PROGRESS_PREFIX = "_";
+	private static final String DEFAULT_IN_PROGRESS_PREFIX = "_";
 
 	/**
 	 * This is used for part files that we are not writing to but which are not yet confirmed by
 	 * checkpoint.
 	 */
-	private final String DEFAULT_PENDING_SUFFIX = ".pending";
+	private static final String DEFAULT_PENDING_SUFFIX = ".pending";
 
 	/**
 	 * See above, but for prefix.
 	 */
-	private final String DEFAULT_PENDING_PREFIX = "_";
+	private static final String DEFAULT_PENDING_PREFIX = "_";
 
 	/**
 	 * When truncate() is not supported on the used FileSystem we instead write a
 	 * file along the part file with this ending that contains the length up to which
 	 * the part file is valid.
 	 */
-	private final String DEFAULT_VALID_SUFFIX = ".valid-length";
+	private static final String DEFAULT_VALID_SUFFIX = ".valid-length";
 
 	/**
 	 * See above, but for prefix.
 	 */
-	private final String DEFAULT_VALID_PREFIX = "_";
+	private static final String DEFAULT_VALID_PREFIX = "_";
 
 	/**
 	 * The default prefix for part files.
 	 */
-	private final String DEFAULT_PART_REFIX = "part";
+	private static final String DEFAULT_PART_REFIX = "part";
 
 	/**
 	 * The default timeout for asynchronous operations such as recoverLease and truncate. In
 	 * milliseconds.
 	 */
-	private final long DEFAULT_ASYNC_TIMEOUT_MS = 60 * 1000;
-
+	private static final long DEFAULT_ASYNC_TIMEOUT_MS = 60 * 1000;
 
 	/**
 	 * The base {@code Path} that stores all bucket directories.
@@ -228,7 +228,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	private String pendingPrefix = DEFAULT_PENDING_PREFIX;
 
 	private String validLengthSuffix = DEFAULT_VALID_SUFFIX;
-	private String validLengthPrefix= DEFAULT_VALID_PREFIX;
+	private String validLengthPrefix = DEFAULT_VALID_PREFIX;
 
 	private String partPrefix = DEFAULT_PART_REFIX;
 
@@ -242,7 +242,6 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	//  Internal fields (not configurable by user)
 	// --------------------------------------------------------------------------------------------
 
-
 	/**
 	 * The part file that we are currently writing to.
 	 */
@@ -266,7 +265,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 
 	/**
 	 * We use reflection to get the .truncate() method, this is only available starting with
-	 * Hadoop 2.7
+	 * Hadoop 2.7.
 	 */
 	private transient Method refTruncate;
 
@@ -290,8 +289,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * Creates a new {@code RollingSink} that writes files to the given base directory.
 	 *
-	 * <p>
-	 * This uses a{@link DateTimeBucketer} as bucketer and a {@link StringWriter} has writer.
+	 *
+	 * <p>This uses a{@link DateTimeBucketer} as bucketer and a {@link StringWriter} has writer.
 	 * The maximum bucket size is set to 384 MB.
 	 *
 	 * @param basePath The directory to which to write the bucket files.
@@ -319,7 +318,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	 */
 	public RollingSink<T> setFSConfig(org.apache.hadoop.conf.Configuration config) {
 		this.fsConfig = new Configuration();
-		for(Map.Entry<String, String> entry : config) {
+		for (Map.Entry<String, String> entry : config) {
 			fsConfig.setString(entry.getKey(), entry.getValue());
 		}
 		return this;
@@ -380,7 +379,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	}
 
 	/**
-	 * Create a file system with the user-defined hdfs config
+	 * Create a file system with the user-defined hdfs config.
 	 * @throws IOException
 	 */
 	private void initFileSystem() throws IOException {
@@ -415,8 +414,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * Determines whether we should change the bucket file we are writing to.
 	 *
-	 * <p>
-	 * This will roll if no file was created yet, if the file size is larger than the specified size
+	 *
+	 * <p>This will roll if no file was created yet, if the file size is larger than the specified size
 	 * or if the {@code Bucketer} determines that we should roll.
 	 */
 	private boolean shouldRoll() throws IOException {
@@ -449,8 +448,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * Opens a new part file.
 	 *
-	 * <p>
-	 * This closes the old bucket file and retrieves a new bucket path from the {@code Bucketer}.
+	 *
+	 * <p>This closes the old bucket file and retrieves a new bucket path from the {@code Bucketer}.
 	 */
 	private void openNewPartFile() throws Exception {
 		closeCurrentPartFile();
@@ -505,8 +504,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * Closes the current part file.
 	 *
-	 * <p>
-	 * This moves the current in-progress part file to a pending file and adds it to the list
+	 *
+	 * <p>This moves the current in-progress part file to a pending file and adds it to the list
 	 * of pending files in our bucket state.
 	 */
 	private void closeCurrentPartFile() throws Exception {
@@ -526,8 +525,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 
 	/**
 	 * Gets the truncate() call using reflection.
-	 * <p>
-	 * <b>NOTE: </b>This code comes from Flume
+	 *
+	 * <p><b>NOTE: </b>This code comes from Flume
 	 */
 	private Method reflectTruncate(FileSystem fs) {
 		Method m = null;
@@ -604,7 +603,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 			"The " + getClass().getSimpleName() + " has not been properly initialized.");
 
 		int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask();
-		
+
 		if (isWriterOpen) {
 			bucketState.currentFile = currentPartPath.toString();
 			bucketState.currentFileValidLength = writer.flush();
@@ -668,11 +667,11 @@ public class RollingSink<T> extends RichSinkFunction<T>
 						DistributedFileSystem dfs = (DistributedFileSystem) fs;
 						LOG.debug("Trying to recover file lease {}", partPath);
 						dfs.recoverLease(partPath);
-						boolean isclosed= dfs.isFileClosed(partPath);
+						boolean isclosed = dfs.isFileClosed(partPath);
 						StopWatch sw = new StopWatch();
 						sw.start();
-						while(!isclosed) {
-							if(sw.getTime() > asyncTimeout) {
+						while (!isclosed) {
+							if (sw.getTime() > asyncTimeout) {
 								break;
 							}
 							try {
@@ -691,8 +690,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 						StopWatch sw = new StopWatch();
 						sw.start();
 						long newLen = fs.getFileStatus(partPath).getLen();
-						while(newLen != bucketState.currentFileValidLength) {
-							if(sw.getTime() > asyncTimeout) {
+						while (newLen != bucketState.currentFileValidLength) {
+							if (sw.getTime() > asyncTimeout) {
 								break;
 							}
 							try {
@@ -749,7 +748,7 @@ public class RollingSink<T> extends RichSinkFunction<T>
 					}
 				} catch (IOException e) {
 					LOG.error("(RESTORE) Error while renaming pending file {} to final path {}: {}", pendingPath, finalPath, e);
-					throw new RuntimeException("Error while renaming pending file " + pendingPath+ " to final path " + finalPath, e);
+					throw new RuntimeException("Error while renaming pending file " + pendingPath + " to final path " + finalPath, e);
 				}
 			}
 		}
@@ -785,8 +784,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * Sets the maximum bucket size in bytes.
 	 *
-	 * <p>
-	 * When a bucket part file becomes larger than this size a new bucket part file is started and
+	 *
+	 * <p>When a bucket part file becomes larger than this size a new bucket part file is started and
 	 * the old one is closed. The name of the bucket files depends on the {@link Bucketer}.
 	 *
 	 * @param batchSize The bucket part file size in bytes.
@@ -875,8 +874,8 @@ public class RollingSink<T> extends RichSinkFunction<T>
 	/**
 	 * Disable cleanup of leftover in-progress/pending files when the sink is opened.
 	 *
-	 * <p>
-	 * This should only be disabled if using the sink without checkpoints, to not remove
+	 *
+	 * <p>This should only be disabled if using the sink without checkpoints, to not remove
 	 * the files already in the directory.
 	 *
 	 * @deprecated This option is deprecated and remains only for backwards compatibility.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SequenceFileWriter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SequenceFileWriter.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SequenceFileWriter.java
index 32cadec..901589f 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SequenceFileWriter.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SequenceFileWriter.java
@@ -15,8 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.fs;
 
+package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -25,6 +25,7 @@ import org.apache.flink.api.java.typeutils.InputTypeConfigurable;
 import org.apache.flink.api.java.typeutils.TupleTypeInfoBase;
 import org.apache.flink.runtime.fs.hdfs.HadoopFileSystem;
 import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -88,7 +89,7 @@ public class SequenceFileWriter<K extends Writable, V extends Writable> extends
 		}
 
 		CompressionCodec codec = null;
-		
+
 		Configuration conf = HadoopFileSystem.getHadoopConfiguration();
 
 		if (!compressionCodecName.equals("None")) {

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StreamWriterBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StreamWriterBase.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StreamWriterBase.java
index a04e4b5..3e9eb11 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StreamWriterBase.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StreamWriterBase.java
@@ -15,12 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -35,7 +37,7 @@ import java.util.EnumSet;
  */
 public abstract class StreamWriterBase<T> implements Writer<T> {
 
-	private static Logger LOG = LoggerFactory.getLogger(BucketingSink.class);
+	private static final Logger LOG = LoggerFactory.getLogger(BucketingSink.class);
 
 	/**
 	 * The {@code FSDataOutputStream} for the current part file.
@@ -61,11 +63,11 @@ public abstract class StreamWriterBase<T> implements Writer<T> {
 	/**
 	 * If hflush is available in this version of HDFS, then this method calls
 	 * hflush, else it calls sync.
+	 *
+	 * <p>Note: This code comes from Flume
+	 *
 	 * @param os - The stream to flush/sync
 	 * @throws java.io.IOException
-	 *
-	 * <p>
-	 * Note: This code comes from Flume
 	 */
 	protected void hflushOrSync(FSDataOutputStream os) throws IOException {
 		try {
@@ -80,8 +82,8 @@ public abstract class StreamWriterBase<T> implements Writer<T> {
 			String msg = "Error while trying to hflushOrSync!";
 			LOG.error(msg + " " + e.getCause());
 			Throwable cause = e.getCause();
-			if(cause != null && cause instanceof IOException) {
-				throw (IOException)cause;
+			if (cause != null && cause instanceof IOException) {
+				throw (IOException) cause;
 			}
 			throw new RuntimeException(msg, e);
 		} catch (Exception e) {
@@ -94,12 +96,11 @@ public abstract class StreamWriterBase<T> implements Writer<T> {
 	/**
 	 * Gets the hflush call using reflection. Fallback to sync if hflush is not available.
 	 *
-	 * <p>
-	 * Note: This code comes from Flume
+	 * <p>Note: This code comes from Flume
 	 */
 	private Method reflectHflushOrSync(FSDataOutputStream os) {
 		Method m = null;
-		if(os != null) {
+		if (os != null) {
 			Class<?> fsDataOutputStreamClass = os.getClass();
 			try {
 				m = fsDataOutputStreamClass.getMethod("hflush");

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StringWriter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StringWriter.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StringWriter.java
index 6568a86..d2ef9d6 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StringWriter.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/StringWriter.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.hadoop.fs.FSDataOutputStream;

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SystemClock.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SystemClock.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SystemClock.java
index 41663df..eedb370 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SystemClock.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/SystemClock.java
@@ -15,8 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.flink.streaming.connectors.fs;
 
+package org.apache.flink.streaming.connectors.fs;
 
 /**
  * A {@link Clock} that uses {@code System.currentTimeMillis()} to determine the system time.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Writer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Writer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Writer.java
index c3b4cb6..ab896c8 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Writer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/Writer.java
@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
 import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
+
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
@@ -58,7 +60,7 @@ public interface Writer<T> extends Serializable {
 	 * taken. The call should close all state related to the current output file,
 	 * including the output stream opened in {@code open}.
 	 */
-	void close() throws IOException ;
+	void close() throws IOException;
 
 	/**
 	 * Writes one element to the bucket file.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BasePathBucketer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BasePathBucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BasePathBucketer.java
index 0bf14b3..2f325f6 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BasePathBucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BasePathBucketer.java
@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
 import org.apache.flink.streaming.connectors.fs.Clock;
+
 import org.apache.hadoop.fs.Path;
 
 /**

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/Bucketer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/Bucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/Bucketer.java
index 86aa9f3..f2eebf3 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/Bucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/Bucketer.java
@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
 import org.apache.flink.streaming.connectors.fs.Clock;
+
 import org.apache.hadoop.fs.Path;
 
 import java.io.Serializable;
@@ -26,8 +28,8 @@ import java.io.Serializable;
  * A bucketer is used with a {@link BucketingSink}
  * to put emitted elements into rolling files.
  *
- * <p>
- * The {@code BucketingSink} can be writing to many buckets at a time, and it is responsible for managing
+ *
+ * <p>The {@code BucketingSink} can be writing to many buckets at a time, and it is responsible for managing
  * a set of active buckets. Whenever a new element arrives it will ask the {@code Bucketer} for the bucket
  * path the element should fall in. The {@code Bucketer} can, for example, determine buckets based on
  * system time.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSink.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSink.java
index 58dd4dc..70168b5 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSink.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSink.java
@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import org.apache.commons.lang3.time.StopWatch;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.state.ListState;
@@ -41,6 +41,8 @@ import org.apache.flink.streaming.connectors.fs.Writer;
 import org.apache.flink.streaming.runtime.tasks.ProcessingTimeCallback;
 import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService;
 import org.apache.flink.util.Preconditions;
+
+import org.apache.commons.lang3.time.StopWatch;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -54,23 +56,23 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.Iterator;
 
 /**
  * Sink that emits its input elements to {@link FileSystem} files within
  * buckets. This is integrated with the checkpointing mechanism to provide exactly once semantics.
  *
- * <p>
- * When creating the sink a {@code basePath} must be specified. The base directory contains
+ *
+ * <p>When creating the sink a {@code basePath} must be specified. The base directory contains
  * one directory for every bucket. The bucket directories themselves contain several part files,
  * one for each parallel subtask of the sink. These part files contain the actual output data.
  *
- * <p>
- * The sink uses a {@link Bucketer} to determine in which bucket directory each element should
+ *
+ * <p>The sink uses a {@link Bucketer} to determine in which bucket directory each element should
  * be written to inside the base directory. The {@code Bucketer} can, for example, use time or
  * a property of the element to determine the bucket directory. The default {@code Bucketer} is a
  * {@link DateTimeBucketer} which will create one new bucket every hour. You can specify
@@ -78,8 +80,8 @@ import java.util.Iterator;
  * {@link BasePathBucketer} if you don't want to have buckets but still want to write part-files
  * in a fault-tolerant way.
  *
- * <p>
- * The filenames of the part files contain the part prefix, the parallel subtask index of the sink
+ *
+ * <p>The filenames of the part files contain the part prefix, the parallel subtask index of the sink
  * and a rolling counter. For example the file {@code "part-1-17"} contains the data from
  * {@code subtask 1} of the sink and is the {@code 17th} bucket created by that subtask. Per default
  * the part prefix is {@code "part"} but this can be configured using {@link #setPartPrefix(String)}.
@@ -87,8 +89,8 @@ import java.util.Iterator;
  * the part counter is increased and a new part file is created. The batch size defaults to {@code 384MB},
  * this can be configured using {@link #setBatchSize(long)}.
  *
- * <p>
- * In some scenarios, the open buckets are required to change based on time. In these cases, the sink
+ *
+ * <p>In some scenarios, the open buckets are required to change based on time. In these cases, the sink
  * needs to determine when a bucket has become inactive, in order to flush and close the part file.
  * To support this there are two configurable settings:
  * <ol>
@@ -97,17 +99,17 @@ import java.util.Iterator;
  *     <li>the minimum amount of time a bucket has to not receive any data before it is considered inactive,
  *     configured by {@link #setInactiveBucketThreshold(long)}</li>
  * </ol>
- * Both of these parameters default to {@code 60,000 ms}, or {@code 1 min}.
+ * Both of these parameters default to {@code 60, 000 ms}, or {@code 1 min}.
  *
- * <p>
- * Part files can be in one of three states: {@code in-progress}, {@code pending} or {@code finished}.
+ *
+ * <p>Part files can be in one of three states: {@code in-progress}, {@code pending} or {@code finished}.
  * The reason for this is how the sink works together with the checkpointing mechanism to provide exactly-once
  * semantics and fault-tolerance. The part file that is currently being written to is {@code in-progress}. Once
  * a part file is closed for writing it becomes {@code pending}. When a checkpoint is successful the currently
  * pending files will be moved to {@code finished}.
  *
- * <p>
- * If case of a failure, and in order to guarantee exactly-once semantics, the sink should roll back to the state it
+ *
+ * <p>If case of a failure, and in order to guarantee exactly-once semantics, the sink should roll back to the state it
  * had when that last successful checkpoint occurred. To this end, when restoring, the restored files in {@code pending}
  * state are transferred into the {@code finished} state while any {@code in-progress} files are rolled back, so that
  * they do not contain data that arrived after the checkpoint from which we restore. If the {@code FileSystem} supports
@@ -117,8 +119,8 @@ import java.util.Iterator;
  * to that point. The prefixes and suffixes for the different file states and valid-length files can be configured
  * using the adequate setter method, e.g. {@link #setPendingSuffix(String)}.
  *
- * <p>
- * <b>NOTE:</b>
+ *
+ * <p><b>NOTE:</b>
  * <ol>
  *     <li>
  *         If checkpointing is not enabled the pending files will never be moved to the finished state. In that case,
@@ -134,15 +136,15 @@ import java.util.Iterator;
  *     </li>
  * </ol>
  *
- * <p>
- * Example:
+ *
+ * <p>Example:
  * <pre>{@code
  *     new BucketingSink<Tuple2<IntWritable, Text>>(outPath)
  *         .setWriter(new SequenceFileWriter<IntWritable, Text>())
  *         .setBucketer(new DateTimeBucketer("yyyy-MM-dd--HHmm")
  * }</pre>
  *
- * This will create a sink that writes to {@code SequenceFiles} and rolls every minute.
+ * <p>This will create a sink that writes to {@code SequenceFiles} and rolls every minute.
  *
  * @see DateTimeBucketer
  * @see StringWriter
@@ -157,7 +159,7 @@ public class BucketingSink<T>
 
 	private static final long serialVersionUID = 1L;
 
-	private static Logger LOG = LoggerFactory.getLogger(BucketingSink.class);
+	private static final Logger LOG = LoggerFactory.getLogger(BucketingSink.class);
 
 	// --------------------------------------------------------------------------------------------
 	//  User configuration values
@@ -167,69 +169,68 @@ public class BucketingSink<T>
 	/**
 	 * The default maximum size of part files (currently {@code 384 MB}).
 	 */
-	private final long DEFAULT_BATCH_SIZE = 1024L * 1024L * 384L;
+	private static final long DEFAULT_BATCH_SIZE = 1024L * 1024L * 384L;
 
 	/**
 	 * The default time between checks for inactive buckets. By default, {60 sec}.
 	 */
-	private final long DEFAULT_INACTIVE_BUCKET_CHECK_INTERVAL_MS = 60 * 1000L;
+	private static final long DEFAULT_INACTIVE_BUCKET_CHECK_INTERVAL_MS = 60 * 1000L;
 
 	/**
 	 * The default threshold (in {@code ms}) for marking a bucket as inactive and
 	 * closing its part files. By default, {60 sec}.
 	 */
-	private final long DEFAULT_INACTIVE_BUCKET_THRESHOLD_MS = 60 * 1000L;
+	private static final long DEFAULT_INACTIVE_BUCKET_THRESHOLD_MS = 60 * 1000L;
 
 	/**
 	 * The suffix for {@code in-progress} part files. These are files we are
 	 * currently writing to, but which were not yet confirmed by a checkpoint.
 	 */
-	private final String DEFAULT_IN_PROGRESS_SUFFIX = ".in-progress";
+	private static final String DEFAULT_IN_PROGRESS_SUFFIX = ".in-progress";
 
 	/**
 	 * The prefix for {@code in-progress} part files. These are files we are
 	 * currently writing to, but which were not yet confirmed by a checkpoint.
 	 */
-	private final String DEFAULT_IN_PROGRESS_PREFIX = "_";
+	private static final String DEFAULT_IN_PROGRESS_PREFIX = "_";
 
 	/**
 	 * The suffix for {@code pending} part files. These are closed files that we are
 	 * not currently writing to (inactive or reached {@link #batchSize}), but which
 	 * were not yet confirmed by a checkpoint.
 	 */
-	private final String DEFAULT_PENDING_SUFFIX = ".pending";
+	private static final String DEFAULT_PENDING_SUFFIX = ".pending";
 
 	/**
 	 * The prefix for {@code pending} part files. These are closed files that we are
 	 * not currently writing to (inactive or reached {@link #batchSize}), but which
 	 * were not yet confirmed by a checkpoint.
 	 */
-	private final String DEFAULT_PENDING_PREFIX = "_";
+	private static final String DEFAULT_PENDING_PREFIX = "_";
 
 	/**
 	 * When {@code truncate()} is not supported by the used {@link FileSystem}, we create
 	 * a file along the part file with this suffix that contains the length up to which
 	 * the part file is valid.
 	 */
-	private final String DEFAULT_VALID_SUFFIX = ".valid-length";
+	private static final String DEFAULT_VALID_SUFFIX = ".valid-length";
 
 	/**
 	 * When {@code truncate()} is not supported by the used {@link FileSystem}, we create
 	 * a file along the part file with this preffix that contains the length up to which
 	 * the part file is valid.
 	 */
-	private final String DEFAULT_VALID_PREFIX = "_";
+	private static final String DEFAULT_VALID_PREFIX = "_";
 
 	/**
 	 * The default prefix for part files.
 	 */
-	private final String DEFAULT_PART_REFIX = "part";
+	private static final String DEFAULT_PART_REFIX = "part";
 
 	/**
 	 * The default timeout for asynchronous operations such as recoverLease and truncate (in {@code ms}).
 	 */
-	private final long DEFAULT_ASYNC_TIMEOUT_MS = 60 * 1000;
-
+	private static final long DEFAULT_ASYNC_TIMEOUT_MS = 60 * 1000;
 
 	/**
 	 * The base {@code Path} that stores all bucket directories.
@@ -259,7 +260,7 @@ public class BucketingSink<T>
 	private String pendingPrefix = DEFAULT_PENDING_PREFIX;
 
 	private String validLengthSuffix = DEFAULT_VALID_SUFFIX;
-	private String validLengthPrefix= DEFAULT_VALID_PREFIX;
+	private String validLengthPrefix = DEFAULT_VALID_PREFIX;
 
 	private String partPrefix = DEFAULT_PART_REFIX;
 
@@ -273,7 +274,7 @@ public class BucketingSink<T>
 	// -------------------------------------------§-------------------------------------------------
 
 	/**
-	 * We use reflection to get the .truncate() method, this is only available starting with Hadoop 2.7
+	 * We use reflection to get the .truncate() method, this is only available starting with Hadoop 2.7 .
 	 */
 	private transient Method refTruncate;
 
@@ -286,7 +287,7 @@ public class BucketingSink<T>
 	private transient ListState<State<T>> restoredBucketStates;
 
 	/**
-	 * User-defined FileSystem parameters
+	 * User-defined FileSystem parameters.
 	 */
 	private Configuration fsConfig;
 
@@ -302,8 +303,8 @@ public class BucketingSink<T>
 	/**
 	 * Creates a new {@code BucketingSink} that writes files to the given base directory.
 	 *
-	 * <p>
-	 * This uses a{@link DateTimeBucketer} as {@link Bucketer} and a {@link StringWriter} has writer.
+	 *
+	 * <p>This uses a{@link DateTimeBucketer} as {@link Bucketer} and a {@link StringWriter} has writer.
 	 * The maximum bucket size is set to 384 MB.
 	 *
 	 * @param basePath The directory to which to write the bucket files.
@@ -330,7 +331,7 @@ public class BucketingSink<T>
 	 */
 	public BucketingSink<T> setFSConfig(org.apache.hadoop.conf.Configuration config) {
 		this.fsConfig = new Configuration();
-		for(Map.Entry<String, String> entry : config) {
+		for (Map.Entry<String, String> entry : config) {
 			fsConfig.setString(entry.getKey(), entry.getValue());
 		}
 		return this;
@@ -572,12 +573,12 @@ public class BucketingSink<T>
 
 	/**
 	 * Gets the truncate() call using reflection.
-	 * <p>
-	 * <b>NOTE:</b> This code comes from Flume.
+	 *
+	 * <p><b>NOTE:</b> This code comes from Flume.
 	 */
 	private Method reflectTruncate(FileSystem fs) {
 		Method m = null;
-		if(fs != null) {
+		if (fs != null) {
 			Class<?> fsClass = fs.getClass();
 			try {
 				m = fsClass.getMethod("truncate", Path.class, long.class);
@@ -897,8 +898,8 @@ public class BucketingSink<T>
 	/**
 	 * Sets the maximum bucket size in bytes.
 	 *
-	 * <p>
-	 * When a bucket part file becomes larger than this size a new bucket part file is started and
+	 *
+	 * <p>When a bucket part file becomes larger than this size a new bucket part file is started and
 	 * the old one is closed. The name of the bucket files depends on the {@link Bucketer}.
 	 *
 	 * @param batchSize The bucket part file size in bytes.
@@ -1008,8 +1009,8 @@ public class BucketingSink<T>
 	/**
 	 * Disable cleanup of leftover in-progress/pending files when the sink is opened.
 	 *
-	 * <p>
-	 * This should only be disabled if using the sink without checkpoints, to not remove
+	 *
+	 * <p>This should only be disabled if using the sink without checkpoints, to not remove
 	 * the files already in the directory.
 	 *
 	 * @deprecated This option is deprecated and remains only for backwards compatibility.

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
index b985e14..b7035fe 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
 import org.apache.flink.streaming.connectors.fs.Clock;
+
 import org.apache.hadoop.fs.Path;
 
 import java.io.IOException;
@@ -28,27 +30,27 @@ import java.util.Date;
 /**
  * A {@link Bucketer} that assigns to buckets based on current system time.
  *
- * <p>
- * The {@code DateTimeBucketer} will create directories of the following form:
+ *
+ * <p>The {@code DateTimeBucketer} will create directories of the following form:
  * {@code /{basePath}/{dateTimePath}/}. The {@code basePath} is the path
  * that was specified as a base path when creating the
  * {@link BucketingSink}. The {@code dateTimePath}
  * is determined based on the current system time and the user provided format string.
  *
- * <p>
- * {@link SimpleDateFormat} is used to derive a date string from the current system time and
+ *
+ * <p>{@link SimpleDateFormat} is used to derive a date string from the current system time and
  * the date format string. The default format string is {@code "yyyy-MM-dd--HH"} so the rolling
  * files will have a granularity of hours.
  *
  *
- * <p>
- * Example:
+ *
+ * <p>Example:
  *
  * <pre>{@code
  *     Bucketer buck = new DateTimeBucketer("yyyy-MM-dd--HH");
  * }</pre>
  *
- * This will create for example the following bucket path:
+ * <p>This will create for example the following bucket path:
  * {@code /base/1976-12-31-14/}
  *
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
index 2d8492f..b096db4 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs;
 
-import com.google.common.collect.Sets;
 import org.apache.flink.api.common.functions.RichMapFunction;
 import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
 import org.apache.flink.streaming.api.datastream.DataStream;
@@ -25,6 +25,8 @@ import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
 import org.apache.flink.test.checkpointing.StreamFaultToleranceTestBase;
 import org.apache.flink.util.NetUtils;
+
+import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.LocatedFileStatus;
@@ -55,8 +57,8 @@ import static org.junit.Assert.assertTrue;
 /**
  * Tests for {@link org.apache.flink.streaming.connectors.fs.RollingSink}.
  *
- * <p>
- * This test only verifies the exactly once behaviour of the sink. Another test tests the
+ *
+ * <p>This test only verifies the exactly once behaviour of the sink. Another test tests the
  * rolling behaviour.
  *
  * @deprecated should be removed with the {@link RollingSink}.
@@ -64,7 +66,7 @@ import static org.junit.Assert.assertTrue;
 @Deprecated
 public class RollingSinkFaultToleranceITCase extends StreamFaultToleranceTestBase {
 
-	final long NUM_STRINGS = 16_000;
+	private static final long NUM_STRINGS = 16_000;
 
 	@ClassRule
 	public static TemporaryFolder tempFolder = new TemporaryFolder();
@@ -105,10 +107,8 @@ public class RollingSinkFaultToleranceITCase extends StreamFaultToleranceTestBas
 	public void testProgram(StreamExecutionEnvironment env) {
 		assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
 
-		int PARALLELISM = 12;
-
 		env.enableCheckpointing(20);
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(12);
 		env.disableOperatorChaining();
 
 		DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
@@ -211,7 +211,6 @@ public class RollingSinkFaultToleranceITCase extends StreamFaultToleranceTestBas
 		private long failurePos;
 		private long count;
 
-
 		OnceFailingIdentityMapper(long numElements) {
 			this.numElements = numElements;
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkITCase.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkITCase.java
index 72f2f21..10d1846 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkITCase.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkITCase.java
@@ -18,15 +18,6 @@
 
 package org.apache.flink.streaming.connectors.fs;
 
-import org.apache.avro.Schema;
-import org.apache.avro.Schema.Type;
-import org.apache.avro.file.DataFileConstants;
-import org.apache.avro.file.DataFileStream;
-import org.apache.avro.generic.GenericData;
-import org.apache.avro.generic.GenericRecord;
-import org.apache.avro.generic.GenericData.StringType;
-import org.apache.avro.specific.SpecificDatumReader;
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.api.common.functions.MapFunction;
 import org.apache.flink.api.common.functions.RichFilterFunction;
 import org.apache.flink.api.common.functions.RichFlatMapFunction;
@@ -45,9 +36,18 @@ import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase;
 import org.apache.flink.util.Collector;
 import org.apache.flink.util.NetUtils;
 
+import org.apache.avro.Schema;
+import org.apache.avro.Schema.Type;
+import org.apache.avro.file.DataFileConstants;
+import org.apache.avro.file.DataFileStream;
+import org.apache.avro.generic.GenericData;
+import org.apache.avro.generic.GenericData.StringType;
+import org.apache.avro.generic.GenericRecord;
+import org.apache.avro.specific.SpecificDatumReader;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
@@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
-
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -77,8 +76,8 @@ import java.util.Map;
  * tests test the different output methods as well as the rolling feature using a manual clock
  * that increases time in lockstep with element computation using latches.
  *
- * <p>
- * This only tests the rolling behaviour of the sink. There is a separate ITCase that verifies
+ *
+ * <p>This only tests the rolling behaviour of the sink. There is a separate ITCase that verifies
  * exactly once behaviour.
  *
  * @deprecated should be removed with the {@link RollingSink}.
@@ -128,13 +127,12 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 	 */
 	@Test
 	public void testNonRollingStringWriter() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/string-non-rolling-out";
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(2);
 
-		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(NUM_ELEMENTS))
+		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
 				.broadcast()
 				.filter(new OddEvenFilter());
 
@@ -145,7 +143,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 				.setPendingSuffix("");
 
 		source
-				.map(new MapFunction<Tuple2<Integer,String>, String>() {
+				.map(new MapFunction<Tuple2<Integer, String>, String>() {
 					private static final long serialVersionUID = 1L;
 					@Override
 					public String map(Tuple2<Integer, String> value) throws Exception {
@@ -160,7 +158,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 
 		BufferedReader br = new BufferedReader(new InputStreamReader(inStream));
 
-		for (int i = 0; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 0; i < numElements; i += 2) {
 			String line = br.readLine();
 			Assert.assertEquals("message #" + i, line);
 		}
@@ -171,7 +169,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 
 		br = new BufferedReader(new InputStreamReader(inStream));
 
-		for (int i = 1; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 1; i < numElements; i += 2) {
 			String line = br.readLine();
 			Assert.assertEquals("message #" + i, line);
 		}
@@ -185,17 +183,16 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 	 */
 	@Test
 	public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out";
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(2);
 
-		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(NUM_ELEMENTS))
+		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
 				.broadcast()
 				.filter(new OddEvenFilter());
 
-		DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer,String>, Tuple2<IntWritable, Text>>() {
+		DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer, String>, Tuple2<IntWritable, Text>>() {
 			private static final long serialVersionUID = 1L;
 
 			@Override
@@ -204,7 +201,6 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 			}
 		});
 
-
 		RollingSink<Tuple2<IntWritable, Text>> sink = new RollingSink<Tuple2<IntWritable, Text>>(outPath)
 				.setWriter(new SequenceFileWriter<IntWritable, Text>())
 				.setBucketer(new NonRollingBucketer())
@@ -227,7 +223,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		IntWritable intWritable = new IntWritable();
 		Text txt = new Text();
 
-		for (int i = 0; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 0; i < numElements; i += 2) {
 			reader.next(intWritable, txt);
 			Assert.assertEquals(i, intWritable.get());
 			Assert.assertEquals("message #" + i, txt.toString());
@@ -244,7 +240,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 				100000,
 				new Configuration());
 
-		for (int i = 1; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 1; i < numElements; i += 2) {
 			reader.next(intWritable, txt);
 			Assert.assertEquals(i, intWritable.get());
 			Assert.assertEquals("message #" + i, txt.toString());
@@ -260,17 +256,16 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 	 */
 	@Test
 	public void testNonRollingSequenceFileWithCompressionWriter() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/seq-non-rolling-out";
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(2);
 
-		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(NUM_ELEMENTS))
+		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
 				.broadcast()
 				.filter(new OddEvenFilter());
 
-		DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer,String>, Tuple2<IntWritable, Text>>() {
+		DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer, String>, Tuple2<IntWritable, Text>>() {
 			private static final long serialVersionUID = 1L;
 
 			@Override
@@ -279,7 +274,6 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 			}
 		});
 
-
 		RollingSink<Tuple2<IntWritable, Text>> sink = new RollingSink<Tuple2<IntWritable, Text>>(outPath)
 				.setWriter(new SequenceFileWriter<IntWritable, Text>("Default", SequenceFile.CompressionType.BLOCK))
 				.setBucketer(new NonRollingBucketer())
@@ -302,7 +296,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		IntWritable intWritable = new IntWritable();
 		Text txt = new Text();
 
-		for (int i = 0; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 0; i < numElements; i += 2) {
 			reader.next(intWritable, txt);
 			Assert.assertEquals(i, intWritable.get());
 			Assert.assertEquals("message #" + i, txt.toString());
@@ -319,7 +313,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 				100000,
 				new Configuration());
 
-		for (int i = 1; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 1; i < numElements; i += 2) {
 			reader.next(intWritable, txt);
 			Assert.assertEquals(i, intWritable.get());
 			Assert.assertEquals("message #" + i, txt.toString());
@@ -328,25 +322,22 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		reader.close();
 		inStream.close();
 	}
-	
-	
+
 	/**
 	 * This tests {@link AvroKeyValueSinkWriter}
 	 * with non-rolling output and without compression.
 	 */
 	@Test
 	public void testNonRollingAvroKeyValueWithoutCompressionWriter() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/avro-kv-no-comp-non-rolling-out";
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(2);
 
-		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(NUM_ELEMENTS))
+		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
 				.broadcast()
 				.filter(new OddEvenFilter());
 
-
 		Map<String, String> properties = new HashMap<>();
 		Schema keySchema = Schema.create(Type.INT);
 		Schema valueSchema = Schema.create(Type.STRING);
@@ -369,7 +360,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		FSDataInputStream inStream = dfs.open(new Path(outPath + "/part-0-0"));
 		SpecificDatumReader<GenericRecord> elementReader = new SpecificDatumReader<GenericRecord>(elementSchema);
 		DataFileStream<GenericRecord> dataFileStream = new DataFileStream<GenericRecord>(inStream, elementReader);
-		for (int i = 0; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 0; i < numElements; i += 2) {
 			AvroKeyValue<Integer, String> wrappedEntry = new AvroKeyValue<Integer, String>(dataFileStream.next());
 			int key = wrappedEntry.getKey().intValue();
 			Assert.assertEquals(i, key);
@@ -383,7 +374,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		inStream = dfs.open(new Path(outPath + "/part-1-0"));
 		dataFileStream = new DataFileStream<GenericRecord>(inStream, elementReader);
 
-		for (int i = 1; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 1; i < numElements; i += 2) {
 			AvroKeyValue<Integer, String> wrappedEntry = new AvroKeyValue<Integer, String>(dataFileStream.next());
 			int key = wrappedEntry.getKey().intValue();
 			Assert.assertEquals(i, key);
@@ -394,24 +385,22 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		dataFileStream.close();
 		inStream.close();
 	}
-	
+
 	/**
 	 * This tests {@link AvroKeyValueSinkWriter}
 	 * with non-rolling output and with compression.
 	 */
 	@Test
 	public void testNonRollingAvroKeyValueWithCompressionWriter() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/avro-kv-no-comp-non-rolling-out";
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(2);
 
-		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(NUM_ELEMENTS))
+		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
 				.broadcast()
 				.filter(new OddEvenFilter());
 
-
 		Map<String, String> properties = new HashMap<>();
 		Schema keySchema = Schema.create(Type.INT);
 		Schema valueSchema = Schema.create(Type.STRING);
@@ -436,7 +425,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		FSDataInputStream inStream = dfs.open(new Path(outPath + "/part-0-0"));
 		SpecificDatumReader<GenericRecord> elementReader = new SpecificDatumReader<GenericRecord>(elementSchema);
 		DataFileStream<GenericRecord> dataFileStream = new DataFileStream<GenericRecord>(inStream, elementReader);
-		for (int i = 0; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 0; i < numElements; i += 2) {
 			AvroKeyValue<Integer, String> wrappedEntry = new AvroKeyValue<Integer, String>(dataFileStream.next());
 			int key = wrappedEntry.getKey().intValue();
 			Assert.assertEquals(i, key);
@@ -450,7 +439,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		inStream = dfs.open(new Path(outPath + "/part-1-0"));
 		dataFileStream = new DataFileStream<GenericRecord>(inStream, elementReader);
 
-		for (int i = 1; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 1; i < numElements; i += 2) {
 			AvroKeyValue<Integer, String> wrappedEntry = new AvroKeyValue<Integer, String>(dataFileStream.next());
 			int key = wrappedEntry.getKey().intValue();
 			Assert.assertEquals(i, key);
@@ -462,20 +451,18 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		inStream.close();
 	}
 
-
 	/**
-	 * This tests user defined hdfs configuration
+	 * This tests user defined hdfs configuration.
 	 * @throws Exception
      */
 	@Test
 	public void testUserDefinedConfiguration() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/string-non-rolling-with-config";
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(2);
 
-		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(NUM_ELEMENTS))
+		DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
 			.broadcast()
 			.filter(new OddEvenFilter());
 
@@ -490,7 +477,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 			.setPendingSuffix("");
 
 		source
-			.map(new MapFunction<Tuple2<Integer,String>, String>() {
+			.map(new MapFunction<Tuple2<Integer, String>, String>() {
 				private static final long serialVersionUID = 1L;
 				@Override
 				public String map(Tuple2<Integer, String> value) throws Exception {
@@ -505,7 +492,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 
 		BufferedReader br = new BufferedReader(new InputStreamReader(inStream));
 
-		for (int i = 0; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 0; i < numElements; i += 2) {
 			String line = br.readLine();
 			Assert.assertEquals("message #" + i, line);
 		}
@@ -516,7 +503,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 
 		br = new BufferedReader(new InputStreamReader(inStream));
 
-		for (int i = 1; i < NUM_ELEMENTS; i += 2) {
+		for (int i = 1; i < numElements; i += 2) {
 			String line = br.readLine();
 			Assert.assertEquals("message #" + i, line);
 		}
@@ -525,8 +512,8 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 	}
 
 	// we use this to synchronize the clock changes to elements being processed
-	final static MultiShotLatch latch1 = new MultiShotLatch();
-	final static MultiShotLatch latch2 = new MultiShotLatch();
+	private static final MultiShotLatch latch1 = new MultiShotLatch();
+	private static final MultiShotLatch latch2 = new MultiShotLatch();
 
 	/**
 	 * This uses {@link org.apache.flink.streaming.connectors.fs.DateTimeBucketer} to
@@ -536,19 +523,16 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 	 */
 	@Test
 	public void testDateTimeRollingStringWriter() throws Exception {
-		final int NUM_ELEMENTS = 20;
-		final int PARALLELISM = 2;
+		final int numElements = 20;
 		final String outPath = hdfsURI + "/rolling-out";
 		DateTimeBucketer.setClock(new ModifyableClock());
 		ModifyableClock.setCurrentTime(0);
 
 		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
-		env.setParallelism(PARALLELISM);
-
-
+		env.setParallelism(2);
 
 		DataStream<Tuple2<Integer, String>> source = env.addSource(new WaitingTestSourceFunction(
-				NUM_ELEMENTS))
+				numElements))
 				.broadcast();
 
 		// the parallel flatMap is chained to the sink, so when it has seen 5 elements it can
@@ -664,7 +648,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 
 		testHarness.processElement(new StreamRecord<>("test1", 1L));
 		testHarness.processElement(new StreamRecord<>("test2", 1L));
-		checkFs(outDir, 1, 1 ,0, 0);
+		checkFs(outDir, 1, 1 , 0, 0);
 
 		testHarness.processElement(new StreamRecord<>("test3", 1L));
 		checkFs(outDir, 1, 2, 0, 0);
@@ -961,7 +945,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		}
 	}
 
-	public static class OddEvenFilter extends RichFilterFunction<Tuple2<Integer, String>> {
+	private static class OddEvenFilter extends RichFilterFunction<Tuple2<Integer, String>> {
 		private static final long serialVersionUID = 1L;
 
 		@Override
@@ -974,7 +958,7 @@ public class RollingSinkITCase extends StreamingMultipleProgramsTestBase {
 		}
 	}
 
-	public static class ModifyableClock implements Clock {
+	private static class ModifyableClock implements Clock {
 
 		private static volatile long currentTime = 0;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkSecuredITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkSecuredITCase.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkSecuredITCase.java
index 768ca5e..6bd75d4 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkSecuredITCase.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkSecuredITCase.java
@@ -26,9 +26,10 @@ import org.apache.flink.configuration.SecurityOptions;
 import org.apache.flink.runtime.security.SecurityUtils;
 import org.apache.flink.streaming.util.TestStreamEnvironment;
 import org.apache.flink.test.util.SecureTestEnvironment;
-import org.apache.flink.test.util.TestingSecurityContext;
 import org.apache.flink.test.util.TestBaseUtils;
+import org.apache.flink.test.util.TestingSecurityContext;
 import org.apache.flink.util.NetUtils;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.http.HttpConfig;
@@ -47,21 +48,21 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 
 /**
  * Tests for running {@link RollingSinkSecuredITCase} which is an extension of {@link RollingSink} in secure environment
- * Note: only executed for Hadoop version > 3.x.x
+ * Note: only executed for Hadoop version > 3.x.x.
  */
 public class RollingSinkSecuredITCase extends RollingSinkITCase {
 
@@ -141,7 +142,6 @@ public class RollingSinkSecuredITCase extends RollingSinkITCase {
 		map.put("HADOOP_CONF_DIR", hdfsSiteXML.getParentFile().getAbsolutePath());
 		TestBaseUtils.setEnv(map);
 
-
 		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
 		builder.checkDataNodeAddrConfig(true);
 		builder.checkDataNodeHostConfig(true);

http://git-wip-us.apache.org/repos/asf/flink/blob/7292c874/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
index 85f23b6..1ed4a7f 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
-import com.google.common.collect.Sets;
 import org.apache.flink.api.common.functions.RichMapFunction;
 import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
 import org.apache.flink.streaming.api.datastream.DataStream;
@@ -25,6 +25,8 @@ import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
 import org.apache.flink.test.checkpointing.StreamFaultToleranceTestBase;
 import org.apache.flink.util.NetUtils;
+
+import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.LocatedFileStatus;
@@ -37,11 +39,11 @@ import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 
+import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.io.BufferedReader;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
@@ -55,13 +57,13 @@ import static org.junit.Assert.assertTrue;
 /**
  * Tests for {@link BucketingSink}.
  *
- * <p>
- * This test only verifies the exactly once behaviour of the sink. Another test tests the
+ *
+ * <p>This test only verifies the exactly once behaviour of the sink. Another test tests the
  * rolling behaviour.
  */
 public class BucketingSinkFaultToleranceITCase extends StreamFaultToleranceTestBase {
 
-	final long NUM_STRINGS = 16_000;
+	static final long NUM_STRINGS = 16_000;
 
 	@ClassRule
 	public static TemporaryFolder tempFolder = new TemporaryFolder();
@@ -102,10 +104,8 @@ public class BucketingSinkFaultToleranceITCase extends StreamFaultToleranceTestB
 	public void testProgram(StreamExecutionEnvironment env) {
 		assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
 
-		int PARALLELISM = 12;
-
 		env.enableCheckpointing(20);
-		env.setParallelism(PARALLELISM);
+		env.setParallelism(12);
 		env.disableOperatorChaining();
 
 		DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
@@ -208,7 +208,6 @@ public class BucketingSinkFaultToleranceITCase extends StreamFaultToleranceTestB
 		private long failurePos;
 		private long count;
 
-
 		OnceFailingIdentityMapper(long numElements) {
 			this.numElements = numElements;
 		}


[13/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kinesis

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
index b22ba0c..a194835 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
@@ -17,17 +17,18 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import com.amazonaws.services.kinesis.model.HashKeyRange;
-import com.amazonaws.services.kinesis.model.Shard;
-import org.apache.commons.lang.StringUtils;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisBehavioursFactory;
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
 import org.apache.flink.streaming.connectors.kinesis.testutils.TestableKinesisDataFetcher;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.Shard;
+import org.apache.commons.lang3.StringUtils;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -39,6 +40,9 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import static org.junit.Assert.assertTrue;
 
+/**
+ * Tests for the {@link ShardConsumer}.
+ */
 public class ShardConsumerTest {
 
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualConsumerProducerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualConsumerProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualConsumerProducerTest.java
index 63c6c2b..2915e2f 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualConsumerProducerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualConsumerProducerTest.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kinesis.manualtests;
 
 import org.apache.flink.api.common.functions.FlatMapFunction;
@@ -37,12 +38,12 @@ import java.util.Properties;
 /**
  * This is a manual test for the AWS Kinesis connector in Flink.
  *
- * It uses:
+ * <p>It uses:
  *  - A custom KinesisSerializationSchema
  *  - A custom KinesisPartitioner
  *
- * Invocation:
- * --region eu-central-1 --accessKey XXXXXXXXXXXX --secretKey XXXXXXXXXXXXXXXX
+ * <p>Invocation:
+ * --region eu-central-1 --accessKey X --secretKey X
  */
 public class ManualConsumerProducerTest {
 
@@ -69,7 +70,7 @@ public class ManualConsumerProducerTest {
 					// every 10th element goes into a different stream
 					@Override
 					public String getTargetStream(String element) {
-						if(element.split("-")[0].endsWith("0")) {
+						if (element.split("-")[0].endsWith("0")) {
 							return "flink-test-2";
 						}
 						return null; // send to default stream
@@ -90,7 +91,6 @@ public class ManualConsumerProducerTest {
 		});
 		simpleStringStream.addSink(kinesis);
 
-
 		// consuming topology
 		Properties consumerProps = new Properties();
 		consumerProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accessKey"));
@@ -104,13 +104,13 @@ public class ManualConsumerProducerTest {
 				String[] parts = value.split("-");
 				try {
 					long l = Long.parseLong(parts[0]);
-					if(l < 0) {
+					if (l < 0) {
 						throw new RuntimeException("Negative");
 					}
-				} catch(NumberFormatException nfe) {
+				} catch (NumberFormatException nfe) {
 					throw new RuntimeException("First part of '" + value + "' is not a valid numeric type");
 				}
-				if(parts[1].length() != 12) {
+				if (parts[1].length() != 12) {
 					throw new RuntimeException("Second part of '" + value + "' doesn't have 12 characters");
 				}
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
index 2e452c1..7abcd3c 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
@@ -14,10 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kinesis.manualtests;
 
-import com.amazonaws.services.kinesis.AmazonKinesisClient;
-import com.amazonaws.services.kinesis.model.DescribeStreamResult;
 import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
@@ -26,6 +25,9 @@ import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.testutils.ExactlyOnceValidatingConsumerThread;
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisEventsGeneratorProducerThread;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+
+import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.model.DescribeStreamResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -38,8 +40,8 @@ import java.util.concurrent.atomic.AtomicReference;
  * Then, it starts a consuming topology, ensuring that all records up to a certain
  * point have been seen.
  *
- * Invocation:
- * --region eu-central-1 --accessKey XXXXXXXXXXXX --secretKey XXXXXXXXXXXXXXXX
+ * <p>Invocation:
+ * --region eu-central-1 --accessKey X --secretKey X
  */
 public class ManualExactlyOnceTest {
 
@@ -67,8 +69,8 @@ public class ManualExactlyOnceTest {
 
 		// wait until stream has been created
 		DescribeStreamResult status = client.describeStream(streamName);
-		LOG.info("status {}" ,status);
-		while(!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
+		LOG.info("status {}" , status);
+		while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
 			status = client.describeStream(streamName);
 			LOG.info("Status of stream {}", status);
 			Thread.sleep(1000);

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
index 71bcae3..226ac3e 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
@@ -17,13 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kinesis.manualtests;
 
-import com.amazonaws.services.kinesis.AmazonKinesisClient;
-import com.amazonaws.services.kinesis.model.DescribeStreamResult;
-import com.amazonaws.services.kinesis.model.LimitExceededException;
-import com.amazonaws.services.kinesis.model.PutRecordsRequest;
-import com.amazonaws.services.kinesis.model.PutRecordsRequestEntry;
-import com.amazonaws.services.kinesis.model.PutRecordsResult;
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
@@ -32,6 +25,14 @@ import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConsta
 import org.apache.flink.streaming.connectors.kinesis.testutils.ExactlyOnceValidatingConsumerThread;
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+
+import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.model.DescribeStreamResult;
+import com.amazonaws.services.kinesis.model.LimitExceededException;
+import com.amazonaws.services.kinesis.model.PutRecordsRequest;
+import com.amazonaws.services.kinesis.model.PutRecordsRequestEntry;
+import com.amazonaws.services.kinesis.model.PutRecordsResult;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,8 +50,8 @@ import java.util.concurrent.atomic.AtomicReference;
  * point have been seen. While the data generator and consuming topology is running,
  * the kinesis stream is resharded two times.
  *
- * Invocation:
- * --region eu-central-1 --accessKey XXXXXXXXXXXX --secretKey XXXXXXXXXXXXXXXX
+ * <p>Invocation:
+ * --region eu-central-1 --accessKey X --secretKey X
  */
 public class ManualExactlyOnceWithStreamReshardingTest {
 
@@ -80,7 +81,7 @@ public class ManualExactlyOnceWithStreamReshardingTest {
 		// wait until stream has been created
 		DescribeStreamResult status = client.describeStream(streamName);
 		LOG.info("status {}", status);
-		while(!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
+		while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
 			status = client.describeStream(streamName);
 			LOG.info("Status of stream {}", status);
 			Thread.sleep(1000);
@@ -113,7 +114,7 @@ public class ManualExactlyOnceWithStreamReshardingTest {
 							Thread.sleep(10);
 
 							Set<PutRecordsRequestEntry> batch = new HashSet<>();
-							for (int i=count; i<count+batchSize; i++) {
+							for (int i = count; i < count + batchSize; i++) {
 								if (i >= TOTAL_EVENT_COUNT) {
 									break;
 								}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
index 81d0bec..8abf4bb 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kinesis.manualtests;
 
 import org.apache.flink.api.java.utils.ParameterTool;
@@ -32,14 +33,14 @@ import java.util.Properties;
 /**
  * This is a manual test for the AWS Kinesis connector in Flink.
  *
- * It uses:
+ * <p>It uses:
  *  - A custom KinesisSerializationSchema
  *  - A custom KinesisPartitioner
  *
- *  The streams "test-flink" and "flink-test-2" must exist.
+ * <p>The streams "test-flink" and "flink-test-2" must exist.
  *
- * Invocation:
- * --region eu-central-1 --accessKey XXXXXXXXXXXX --secretKey XXXXXXXXXXXXXXXX
+ * <p>Invocation:
+ * --region eu-central-1 --accessKey X --secretKey X
  */
 public class ManualProducerTest {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
index 86202c5..7ca05d7 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
@@ -17,15 +17,14 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-import org.junit.Test;
-
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.AmazonServiceException.ErrorType;
 import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
 import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test for methods in the {@link KinesisProxy} class.

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/ExactlyOnceValidatingConsumerThread.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/ExactlyOnceValidatingConsumerThread.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/ExactlyOnceValidatingConsumerThread.java
index 157964c..75356ef 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/ExactlyOnceValidatingConsumerThread.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/ExactlyOnceValidatingConsumerThread.java
@@ -29,6 +29,7 @@ import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConsta
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
 import org.apache.flink.test.util.SuccessException;
 import org.apache.flink.util.Collector;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -40,7 +41,7 @@ import static org.apache.flink.test.util.TestUtils.tryExecute;
 
 /**
  * A thread that runs a topology with the FlinkKinesisConsumer as source, followed by two flat map
- * functions, one that performs artificial failures and another that validates exactly-once guarantee
+ * functions, one that performs artificial failures and another that validates exactly-once guarantee.
  */
 public class ExactlyOnceValidatingConsumerThread {
 
@@ -94,7 +95,7 @@ public class ExactlyOnceValidatingConsumerThread {
 		return new Thread(exactlyOnceValidationConsumer);
 	}
 
-	private static class ExactlyOnceValidatingMapper implements FlatMapFunction<String,String>, Checkpointed<BitSet> {
+	private static class ExactlyOnceValidatingMapper implements FlatMapFunction<String, String>, Checkpointed<BitSet> {
 
 		private static final Logger LOG = LoggerFactory.getLogger(ExactlyOnceValidatingMapper.class);
 
@@ -111,15 +112,15 @@ public class ExactlyOnceValidatingConsumerThread {
 			LOG.info("Consumed {}", value);
 
 			int id = Integer.parseInt(value.split("-")[0]);
-			if(validator.get(id)) {
-				throw new RuntimeException("Saw id " + id +" twice!");
+			if (validator.get(id)) {
+				throw new RuntimeException("Saw id " + id + " twice!");
 			}
 			validator.set(id);
-			if(id > totalEventCount-1) {
+			if (id > totalEventCount - 1) {
 				throw new RuntimeException("Out of bounds ID observed");
 			}
 
-			if(validator.nextClearBit(0) == totalEventCount) {
+			if (validator.nextClearBit(0) == totalEventCount) {
 				throw new SuccessException();
 			}
 		}
@@ -135,7 +136,7 @@ public class ExactlyOnceValidatingConsumerThread {
 		}
 	}
 
-	private static class ArtificialFailOnceFlatMapper extends RichFlatMapFunction<String,String> {
+	private static class ArtificialFailOnceFlatMapper extends RichFlatMapFunction<String, String> {
 		int count = 0;
 
 		private final int failAtRecordCount;

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
index ce5a0de..2fda0d5 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
@@ -17,15 +17,16 @@
 
 package org.apache.flink.streaming.connectors.kinesis.testutils;
 
-import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
-import com.amazonaws.services.kinesis.model.GetRecordsResult;
-import com.amazonaws.services.kinesis.model.Record;
-import com.amazonaws.services.kinesis.model.Shard;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 
+import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.Shard;
+
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Date;
@@ -67,7 +68,7 @@ public class FakeKinesisBehavioursFactory {
 
 	}
 
-	public static KinesisProxyInterface nonReshardedStreamsBehaviour(Map<String,Integer> streamsToShardCount) {
+	public static KinesisProxyInterface nonReshardedStreamsBehaviour(Map<String, Integer> streamsToShardCount) {
 		return new NonReshardedStreamsKinesis(streamsToShardCount);
 
 	}
@@ -79,14 +80,14 @@ public class FakeKinesisBehavioursFactory {
 	public static KinesisProxyInterface totalNumOfRecordsAfterNumOfGetRecordsCalls(final int numOfRecords, final int numOfGetRecordsCalls) {
 		return new SingleShardEmittingFixNumOfRecordsKinesis(numOfRecords, numOfGetRecordsCalls);
 	}
-	
+
 	public static KinesisProxyInterface totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(
 		final int numOfRecords, final int numOfGetRecordsCall, final int orderOfCallToExpire) {
 		return new SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis(
 			numOfRecords, numOfGetRecordsCall, orderOfCallToExpire);
 	}
 
-	public static class SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis extends SingleShardEmittingFixNumOfRecordsKinesis {
+	private static class SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis extends SingleShardEmittingFixNumOfRecordsKinesis {
 
 		private boolean expiredOnceAlready = false;
 		private boolean expiredIteratorRefreshed = false;
@@ -103,7 +104,7 @@ public class FakeKinesisBehavioursFactory {
 
 		@Override
 		public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) {
-			if ((Integer.valueOf(shardIterator) == orderOfCallToExpire-1) && !expiredOnceAlready) {
+			if ((Integer.valueOf(shardIterator) == orderOfCallToExpire - 1) && !expiredOnceAlready) {
 				// we fake only once the expired iterator exception at the specified get records attempt order
 				expiredOnceAlready = true;
 				throw new ExpiredIteratorException("Artificial expired shard iterator");
@@ -130,7 +131,7 @@ public class FakeKinesisBehavioursFactory {
 				// fake the iterator refresh when this is called again after getRecords throws expired iterator
 				// exception on the orderOfCallToExpire attempt
 				expiredIteratorRefreshed = true;
-				return String.valueOf(orderOfCallToExpire-1);
+				return String.valueOf(orderOfCallToExpire - 1);
 			}
 		}
 	}
@@ -141,7 +142,7 @@ public class FakeKinesisBehavioursFactory {
 
 		protected final int totalNumOfRecords;
 
-		protected final Map<String,List<Record>> shardItrToRecordBatch;
+		protected final Map<String, List<Record>> shardItrToRecordBatch;
 
 		public SingleShardEmittingFixNumOfRecordsKinesis(final int numOfRecords, final int numOfGetRecordsCalls) {
 			this.totalNumOfRecords = numOfRecords;
@@ -151,9 +152,9 @@ public class FakeKinesisBehavioursFactory {
 			this.shardItrToRecordBatch = new HashMap<>();
 
 			int numOfAlreadyPartitionedRecords = 0;
-			int numOfRecordsPerBatch = numOfRecords/numOfGetRecordsCalls + 1;
-			for (int batch=0; batch<totalNumOfGetRecordsCalls; batch++) {
-				if (batch != totalNumOfGetRecordsCalls-1) {
+			int numOfRecordsPerBatch = numOfRecords / numOfGetRecordsCalls + 1;
+			for (int batch = 0; batch < totalNumOfGetRecordsCalls; batch++) {
+				if (batch != totalNumOfGetRecordsCalls - 1) {
 					shardItrToRecordBatch.put(
 						String.valueOf(batch),
 						createRecordBatchWithRange(
@@ -176,8 +177,8 @@ public class FakeKinesisBehavioursFactory {
 			return new GetRecordsResult()
 				.withRecords(shardItrToRecordBatch.get(shardIterator))
 				.withNextShardIterator(
-					(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls-1)
-						? null : String.valueOf(Integer.valueOf(shardIterator)+1)); // last next shard iterator is null
+					(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls - 1)
+						? null : String.valueOf(Integer.valueOf(shardIterator) + 1)); // last next shard iterator is null
 		}
 
 		@Override
@@ -211,8 +212,8 @@ public class FakeKinesisBehavioursFactory {
 
 		private Map<String, List<StreamShardHandle>> streamsWithListOfShards = new HashMap<>();
 
-		public NonReshardedStreamsKinesis(Map<String,Integer> streamsToShardCount) {
-			for (Map.Entry<String,Integer> streamToShardCount : streamsToShardCount.entrySet()) {
+		public NonReshardedStreamsKinesis(Map<String, Integer> streamsToShardCount) {
+			for (Map.Entry<String, Integer> streamToShardCount : streamsToShardCount.entrySet()) {
 				String streamName = streamToShardCount.getKey();
 				int shardCount = streamToShardCount.getValue();
 
@@ -220,7 +221,7 @@ public class FakeKinesisBehavioursFactory {
 					// don't do anything
 				} else {
 					List<StreamShardHandle> shardsOfStream = new ArrayList<>(shardCount);
-					for (int i=0; i < shardCount; i++) {
+					for (int i = 0; i < shardCount; i++) {
 						shardsOfStream.add(
 							new StreamShardHandle(
 								streamName,

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisEventsGeneratorProducerThread.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisEventsGeneratorProducerThread.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisEventsGeneratorProducerThread.java
index fdfdfe1..699c977 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisEventsGeneratorProducerThread.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisEventsGeneratorProducerThread.java
@@ -17,7 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kinesis.testutils;
 
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
@@ -25,6 +24,8 @@ import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
+
+import org.apache.commons.lang3.RandomStringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,12 +98,12 @@ public class KinesisEventsGeneratorProducerThread {
 		@Override
 		public void run(SourceContext<String> ctx) throws Exception {
 			long seq = 0;
-			while(running) {
+			while (running) {
 				Thread.sleep(10);
 				String evt = (seq++) + "-" + RandomStringUtils.randomAlphabetic(12);
 				ctx.collect(evt);
 				LOG.info("Emitting event {}", evt);
-				if(seq >= limit) {
+				if (seq >= limit) {
 					break;
 				}
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisShardIdGenerator.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisShardIdGenerator.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisShardIdGenerator.java
index c8dd347..0377a66 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisShardIdGenerator.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisShardIdGenerator.java
@@ -17,8 +17,12 @@
 
 package org.apache.flink.streaming.connectors.kinesis.testutils;
 
+/**
+ * A generator for Kinesis shard IDs.
+ *
+ * <p>Kinesis shard ids are in the form of: shardId-\d{12}
+ */
 public class KinesisShardIdGenerator {
-	// Kinesis shards ids are in the form of: ^shardId-\d{12}
 	public static String generateFromShardOrder(int order) {
 		return String.format("shardId-%012d", order);
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableFlinkKinesisConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableFlinkKinesisConsumer.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableFlinkKinesisConsumer.java
index 80ad06c..6c91eaf 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableFlinkKinesisConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableFlinkKinesisConsumer.java
@@ -20,12 +20,16 @@ package org.apache.flink.streaming.connectors.kinesis.testutils;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
+
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import java.util.Properties;
 
+/**
+ * Extension of the {@link FlinkKinesisConsumer} for testing.
+ */
 public class TestableFlinkKinesisConsumer extends FlinkKinesisConsumer<String> {
 
 	private final RuntimeContext mockedRuntimeCtx;

http://git-wip-us.apache.org/repos/asf/flink/blob/b12de1ed/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
index bb644ba..b6f3cbc 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
@@ -27,6 +27,7 @@ import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
 import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
+
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -37,6 +38,9 @@ import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.atomic.AtomicReference;
 
+/**
+ * Extension of the {@link KinesisDataFetcher} for testing.
+ */
 public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 
 	private static final Object fakeCheckpointLock = new Object();
@@ -45,14 +49,15 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 
 	private OneShotLatch runWaiter;
 
-	public TestableKinesisDataFetcher(List<String> fakeStreams,
-									  Properties fakeConfiguration,
-									  int fakeTotalCountOfSubtasks,
-									  int fakeTndexOfThisSubtask,
-									  AtomicReference<Throwable> thrownErrorUnderTest,
-									  LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
-									  HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
-									  KinesisProxyInterface fakeKinesis) {
+	public TestableKinesisDataFetcher(
+			List<String> fakeStreams,
+			Properties fakeConfiguration,
+			int fakeTotalCountOfSubtasks,
+			int fakeTndexOfThisSubtask,
+			AtomicReference<Throwable> thrownErrorUnderTest,
+			LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
+			HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
+			KinesisProxyInterface fakeKinesis) {
 		super(fakeStreams,
 			getMockedSourceContext(),
 			fakeCheckpointLock,


[16/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-hadoop-compatibility

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-hadoop-compatibility


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fab8fe57
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fab8fe57
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fab8fe57

Branch: refs/heads/master
Commit: fab8fe57ca7808a8c7dfaee1834a0429217942f2
Parents: b12de1e
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:56:53 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:21 2017 +0200

----------------------------------------------------------------------
 .../flink-hadoop-compatibility/pom.xml          |   7 +-
 .../api/java/typeutils/WritableTypeInfo.java    |  23 +++--
 .../typeutils/runtime/WritableComparator.java   |  65 ++++++------
 .../typeutils/runtime/WritableSerializer.java   |  54 +++++-----
 .../flink/hadoopcompatibility/HadoopInputs.java |  28 +++---
 .../flink/hadoopcompatibility/HadoopUtils.java  |   6 +-
 .../mapred/HadoopMapFunction.java               |  59 +++++------
 .../mapred/HadoopReduceCombineFunction.java     |  75 +++++++-------
 .../mapred/HadoopReduceFunction.java            |  67 +++++++------
 .../mapred/wrapper/HadoopOutputCollector.java   |  15 +--
 .../wrapper/HadoopTupleUnwrappingIterator.java  |  32 +++---
 .../scala/HadoopInputs.scala                    |   6 +-
 .../java/typeutils/WritableExtractionTest.java  |  38 ++++---
 .../java/typeutils/WritableInfoParserTest.java  |  10 +-
 .../java/typeutils/WritableTypeInfoTest.java    |  10 +-
 .../typeutils/runtime/StringArrayWritable.java  |  36 ++++---
 .../runtime/WritableComparatorTest.java         |  25 +++--
 .../runtime/WritableComparatorUUIDTest.java     |   3 +
 .../api/java/typeutils/runtime/WritableID.java  |   4 +
 .../runtime/WritableSerializerTest.java         |  28 +++---
 .../runtime/WritableSerializerUUIDTest.java     |   3 +
 .../hadoopcompatibility/HadoopUtilsTest.java    |   4 +
 .../mapred/HadoopMapFunctionITCase.java         |  45 +++++----
 .../mapred/HadoopMapredITCase.java              |   8 +-
 .../HadoopReduceCombineFunctionITCase.java      |  69 +++++++++----
 .../mapred/HadoopReduceFunctionITCase.java      |  61 +++++++----
 .../mapred/HadoopTestData.java                  |  60 +++++------
 .../example/HadoopMapredCompatWordCount.java    |  70 +++++++------
 .../HadoopTupleUnwrappingIteratorTest.java      | 100 ++++++++++---------
 .../mapreduce/HadoopInputOutputITCase.java      |  16 ++-
 .../mapreduce/example/WordCount.java            |  49 ++++-----
 31 files changed, 604 insertions(+), 472 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/pom.xml b/flink-connectors/flink-hadoop-compatibility/pom.xml
index 2dee17d..9427e43 100644
--- a/flink-connectors/flink-hadoop-compatibility/pom.xml
+++ b/flink-connectors/flink-hadoop-compatibility/pom.xml
@@ -19,9 +19,9 @@ under the License.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-	
+
 	<modelVersion>4.0.0</modelVersion>
-	
+
 	<parent>
 		<groupId>org.apache.flink</groupId>
 		<artifactId>flink-connectors</artifactId>
@@ -82,9 +82,8 @@ under the License.
 			<scope>test</scope>
 			<type>test-jar</type>
 		</dependency>
-		
-	</dependencies>
 
+	</dependencies>
 
 	<build>
 		<plugins>

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/WritableTypeInfo.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/WritableTypeInfo.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/WritableTypeInfo.java
index 7bcb4bf..cde309b 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/WritableTypeInfo.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/WritableTypeInfo.java
@@ -28,6 +28,7 @@ import org.apache.flink.api.common.typeutils.TypeComparator;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
 import org.apache.flink.api.java.typeutils.runtime.WritableComparator;
 import org.apache.flink.api.java.typeutils.runtime.WritableSerializer;
+
 import org.apache.hadoop.io.Writable;
 
 import static org.apache.flink.util.Preconditions.checkArgument;
@@ -41,9 +42,9 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  */
 @Public
 public class WritableTypeInfo<T extends Writable> extends TypeInformation<T> implements AtomicType<T> {
-	
+
 	private static final long serialVersionUID = 1L;
-	
+
 	private final Class<T> typeClass;
 
 	@PublicEvolving
@@ -59,11 +60,11 @@ public class WritableTypeInfo<T extends Writable> extends TypeInformation<T> imp
 	@Override
 	@PublicEvolving
 	public TypeComparator<T> createComparator(boolean sortOrderAscending, ExecutionConfig executionConfig) {
-		if(Comparable.class.isAssignableFrom(typeClass)) {
+		if (Comparable.class.isAssignableFrom(typeClass)) {
 			return new WritableComparator(sortOrderAscending, typeClass);
 		}
 		else {
-			throw new UnsupportedOperationException("Cannot create Comparator for "+typeClass.getCanonicalName()+". " +
+			throw new UnsupportedOperationException("Cannot create Comparator for " + typeClass.getCanonicalName() + ". " +
 													"Class does not implement Comparable interface.");
 		}
 	}
@@ -85,7 +86,7 @@ public class WritableTypeInfo<T extends Writable> extends TypeInformation<T> imp
 	public int getArity() {
 		return 1;
 	}
-	
+
 	@Override
 	@PublicEvolving
 	public int getTotalFields() {
@@ -109,17 +110,17 @@ public class WritableTypeInfo<T extends Writable> extends TypeInformation<T> imp
 	public TypeSerializer<T> createSerializer(ExecutionConfig executionConfig) {
 		return new WritableSerializer<T>(typeClass);
 	}
-	
+
 	@Override
 	public String toString() {
 		return "WritableType<" + typeClass.getName() + ">";
-	}	
-	
+	}
+
 	@Override
 	public int hashCode() {
 		return typeClass.hashCode();
 	}
-	
+
 	@Override
 	public boolean equals(Object obj) {
 		if (obj instanceof WritableTypeInfo) {
@@ -138,7 +139,7 @@ public class WritableTypeInfo<T extends Writable> extends TypeInformation<T> imp
 	public boolean canEqual(Object obj) {
 		return obj instanceof WritableTypeInfo;
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 
 	@PublicEvolving
@@ -150,5 +151,5 @@ public class WritableTypeInfo<T extends Writable> extends TypeInformation<T> imp
 			throw new InvalidTypesException("The given class is no subclass of " + Writable.class.getName());
 		}
 	}
-	
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
index 3a95d94..083a56f 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
@@ -18,30 +18,35 @@
 
 package org.apache.flink.api.java.typeutils.runtime;
 
-import com.esotericsoftware.kryo.Kryo;
 import org.apache.flink.api.common.typeutils.TypeComparator;
 import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
 import org.apache.flink.core.memory.MemorySegment;
 import org.apache.flink.types.NormalizableKey;
 import org.apache.flink.util.InstantiationUtil;
+
+import com.esotericsoftware.kryo.Kryo;
 import org.apache.hadoop.io.Writable;
 import org.objenesis.strategy.StdInstantiatorStrategy;
 
 import java.io.IOException;
 
+/**
+ * A {@link TypeComparator} for {@link Writable}.
+ * @param <T>
+ */
 public class WritableComparator<T extends Writable & Comparable<T>> extends TypeComparator<T> {
-	
+
 	private static final long serialVersionUID = 1L;
-	
+
 	private Class<T> type;
-	
+
 	private final boolean ascendingComparison;
-	
+
 	private transient T reference;
-	
+
 	private transient T tempReference;
-	
+
 	private transient Kryo kryo;
 
 	@SuppressWarnings("rawtypes")
@@ -51,78 +56,78 @@ public class WritableComparator<T extends Writable & Comparable<T>> extends Type
 		this.type = type;
 		this.ascendingComparison = ascending;
 	}
-	
+
 	@Override
 	public int hash(T record) {
 		return record.hashCode();
 	}
-	
+
 	@Override
 	public void setReference(T toCompare) {
 		checkKryoInitialized();
 
 		reference = KryoUtils.copy(toCompare, kryo, new WritableSerializer<T>(type));
 	}
-	
+
 	@Override
 	public boolean equalToReference(T candidate) {
 		return candidate.equals(reference);
 	}
-	
+
 	@Override
 	public int compareToReference(TypeComparator<T> referencedComparator) {
 		T otherRef = ((WritableComparator<T>) referencedComparator).reference;
 		int comp = otherRef.compareTo(reference);
 		return ascendingComparison ? comp : -comp;
 	}
-	
+
 	@Override
 	public int compare(T first, T second) {
 		int comp = first.compareTo(second);
 		return ascendingComparison ? comp : -comp;
 	}
-	
+
 	@Override
 	public int compareSerialized(DataInputView firstSource, DataInputView secondSource) throws IOException {
 		ensureReferenceInstantiated();
 		ensureTempReferenceInstantiated();
-		
+
 		reference.readFields(firstSource);
 		tempReference.readFields(secondSource);
-		
+
 		int comp = reference.compareTo(tempReference);
 		return ascendingComparison ? comp : -comp;
 	}
-	
+
 	@Override
 	public boolean supportsNormalizedKey() {
 		return NormalizableKey.class.isAssignableFrom(type);
 	}
-	
+
 	@Override
 	public int getNormalizeKeyLen() {
 		ensureReferenceInstantiated();
-		
+
 		NormalizableKey<?> key = (NormalizableKey<?>) reference;
 		return key.getMaxNormalizedKeyLen();
 	}
-	
+
 	@Override
 	public boolean isNormalizedKeyPrefixOnly(int keyBytes) {
 		return keyBytes < getNormalizeKeyLen();
 	}
-	
+
 	@Override
 	public void putNormalizedKey(T record, MemorySegment target, int offset, int numBytes) {
 		NormalizableKey<?> key = (NormalizableKey<?>) record;
 		key.copyNormalizedKey(target, offset, numBytes);
 	}
-	
+
 	@Override
 	public boolean invertNormalizedKey() {
 		return !ascendingComparison;
 	}
-	
+
 	@Override
 	public TypeComparator<T> duplicate() {
 		return new WritableComparator<T>(ascendingComparison, type);
@@ -139,28 +144,28 @@ public class WritableComparator<T extends Writable & Comparable<T>> extends Type
 	public TypeComparator[] getFlatComparators() {
 		return comparators;
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
 	// unsupported normalization
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Override
 	public boolean supportsSerializationWithKeyNormalization() {
 		return false;
 	}
-	
+
 	@Override
 	public void writeWithKeyNormalization(T record, DataOutputView target) throws IOException {
 		throw new UnsupportedOperationException();
 	}
-	
+
 	@Override
 	public T readWithKeyDenormalization(T reuse, DataInputView source) throws IOException {
 		throw new UnsupportedOperationException();
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
-	
+
 	private void checkKryoInitialized() {
 		if (this.kryo == null) {
 			this.kryo = new Kryo();
@@ -173,13 +178,13 @@ public class WritableComparator<T extends Writable & Comparable<T>> extends Type
 			this.kryo.register(type);
 		}
 	}
-	
+
 	private void ensureReferenceInstantiated() {
 		if (reference == null) {
 			reference = InstantiationUtil.instantiate(type, Writable.class);
 		}
 	}
-	
+
 	private void ensureTempReferenceInstantiated() {
 		if (tempReference == null) {
 			tempReference = InstantiationUtil.instantiate(type, Writable.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
index 421d7a3..161e65b 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
@@ -18,8 +18,6 @@
 
 package org.apache.flink.api.java.typeutils.runtime;
 
-
-import com.esotericsoftware.kryo.Kryo;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.typeutils.CompatibilityResult;
 import org.apache.flink.api.common.typeutils.GenericTypeSerializerConfigSnapshot;
@@ -28,98 +26,102 @@ import org.apache.flink.api.common.typeutils.TypeSerializerConfigSnapshot;
 import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
 import org.apache.flink.util.InstantiationUtil;
+
+import com.esotericsoftware.kryo.Kryo;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
 import org.objenesis.strategy.StdInstantiatorStrategy;
 
 import java.io.IOException;
 
+/**
+ * A {@link TypeSerializer} for {@link Writable}.
+ * @param <T>
+ */
 @Internal
 public final class WritableSerializer<T extends Writable> extends TypeSerializer<T> {
-	
+
 	private static final long serialVersionUID = 1L;
-	
+
 	private final Class<T> typeClass;
-	
+
 	private transient Kryo kryo;
-	
+
 	private transient T copyInstance;
-	
+
 	public WritableSerializer(Class<T> typeClass) {
 		this.typeClass = typeClass;
 	}
-	
+
 	@SuppressWarnings("unchecked")
 	@Override
 	public T createInstance() {
-		if(typeClass == NullWritable.class) {
+		if (typeClass == NullWritable.class) {
 			return (T) NullWritable.get();
 		}
 		return InstantiationUtil.instantiate(typeClass);
 	}
 
-
-	
 	@Override
 	public T copy(T from) {
 		checkKryoInitialized();
 
 		return KryoUtils.copy(from, kryo, this);
 	}
-	
+
 	@Override
 	public T copy(T from, T reuse) {
 		checkKryoInitialized();
 
 		return KryoUtils.copy(from, reuse, kryo, this);
 	}
-	
+
 	@Override
 	public int getLength() {
 		return -1;
 	}
-	
+
 	@Override
 	public void serialize(T record, DataOutputView target) throws IOException {
 		record.write(target);
 	}
-	
+
 	@Override
 	public T deserialize(DataInputView source) throws IOException {
 		return deserialize(createInstance(), source);
 	}
-	
+
 	@Override
 	public T deserialize(T reuse, DataInputView source) throws IOException {
 		reuse.readFields(source);
 		return reuse;
 	}
-	
+
 	@Override
 	public void copy(DataInputView source, DataOutputView target) throws IOException {
 		ensureInstanceInstantiated();
 		copyInstance.readFields(source);
 		copyInstance.write(target);
 	}
-	
+
 	@Override
 	public boolean isImmutableType() {
 		return false;
 	}
-	
+
 	@Override
 	public WritableSerializer<T> duplicate() {
 		return new WritableSerializer<T>(typeClass);
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
-	
+
 	private void ensureInstanceInstantiated() {
 		if (copyInstance == null) {
 			copyInstance = createInstance();
 		}
 	}
-	
+
 	private void checkKryoInitialized() {
 		if (this.kryo == null) {
 			this.kryo = new Kryo();
@@ -133,12 +135,12 @@ public final class WritableSerializer<T extends Writable> extends TypeSerializer
 		}
 	}
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Override
 	public int hashCode() {
 		return this.typeClass.hashCode();
 	}
-	
+
 	@Override
 	public boolean equals(Object obj) {
 		if (obj instanceof WritableSerializer) {
@@ -175,6 +177,10 @@ public final class WritableSerializer<T extends Writable> extends TypeSerializer
 		}
 	}
 
+	/**
+	 * The config snapshot for this serializer.
+	 * @param <T>
+	 */
 	public static final class WritableSerializerConfigSnapshot<T extends Writable>
 			extends GenericTypeSerializerConfigSnapshot<T> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java
index 9e8a3e4..dd5a74f 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java
@@ -20,6 +20,7 @@ package org.apache.flink.hadoopcompatibility;
 
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.api.java.hadoop.mapred.HadoopInputFormat;
+
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.Job;
 
@@ -28,10 +29,10 @@ import java.io.IOException;
 /**
  * HadoopInputs is a utility class to use Apache Hadoop InputFormats with Apache Flink.
  *
- * It provides methods to create Flink InputFormat wrappers for Hadoop {@link org.apache.hadoop.mapred.InputFormat}
+ * <p>It provides methods to create Flink InputFormat wrappers for Hadoop {@link org.apache.hadoop.mapred.InputFormat}
  * and {@link org.apache.hadoop.mapreduce.InputFormat}.
  *
- * Key value pairs produced by the Hadoop InputFormats are converted into Flink
+ * <p>Key value pairs produced by the Hadoop InputFormats are converted into Flink
  * {@link org.apache.flink.api.java.tuple.Tuple2 Tuple2} objects where the first field
  * ({@link org.apache.flink.api.java.tuple.Tuple2#f0 Tuple2.f0}) is the key and the second field
  * ({@link org.apache.flink.api.java.tuple.Tuple2#f1 Tuple2.f1}) is the value.
@@ -46,7 +47,7 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps the Hadoop FileInputFormat.
 	 */
-	public static <K,V> HadoopInputFormat<K, V> readHadoopFile(org.apache.hadoop.mapred.FileInputFormat<K,V> mapredInputFormat, Class<K> key, Class<V> value, String inputPath, JobConf job) {
+	public static <K, V> HadoopInputFormat<K, V> readHadoopFile(org.apache.hadoop.mapred.FileInputFormat<K, V> mapredInputFormat, Class<K> key, Class<V> value, String inputPath, JobConf job) {
 		// set input path in JobConf
 		org.apache.hadoop.mapred.FileInputFormat.addInputPath(job, new org.apache.hadoop.fs.Path(inputPath));
 		// return wrapping InputFormat
@@ -58,7 +59,7 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps the Hadoop FileInputFormat.
 	 */
-	public static <K,V> HadoopInputFormat<K, V> readHadoopFile(org.apache.hadoop.mapred.FileInputFormat<K,V> mapredInputFormat, Class<K> key, Class<V> value, String inputPath) {
+	public static <K, V> HadoopInputFormat<K, V> readHadoopFile(org.apache.hadoop.mapred.FileInputFormat<K, V> mapredInputFormat, Class<K> key, Class<V> value, String inputPath) {
 		return readHadoopFile(mapredInputFormat, key, value, inputPath, new JobConf());
 	}
 
@@ -67,7 +68,7 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps a Hadoop SequenceFileInputFormat.
 	 */
-	public static <K,V> HadoopInputFormat<K, V> readSequenceFile(Class<K> key, Class<V> value, String inputPath) throws IOException {
+	public static <K, V> HadoopInputFormat<K, V> readSequenceFile(Class<K> key, Class<V> value, String inputPath) throws IOException {
 		return readHadoopFile(new org.apache.hadoop.mapred.SequenceFileInputFormat<K, V>(), key, value, inputPath);
 	}
 
@@ -76,7 +77,7 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps the Hadoop InputFormat.
 	 */
-	public static <K,V> HadoopInputFormat<K, V> createHadoopInput(org.apache.hadoop.mapred.InputFormat<K,V> mapredInputFormat, Class<K> key, Class<V> value, JobConf job) {
+	public static <K, V> HadoopInputFormat<K, V> createHadoopInput(org.apache.hadoop.mapred.InputFormat<K, V> mapredInputFormat, Class<K> key, Class<V> value, JobConf job) {
 		return new HadoopInputFormat<>(mapredInputFormat, key, value, job);
 	}
 
@@ -85,9 +86,8 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps the Hadoop FileInputFormat.
 	 */
-	public static <K,V> org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> readHadoopFile(
-			org.apache.hadoop.mapreduce.lib.input.FileInputFormat<K,V> mapreduceInputFormat, Class<K> key, Class<V> value, String inputPath, Job job) throws IOException
-	{
+	public static <K, V> org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> readHadoopFile(
+			org.apache.hadoop.mapreduce.lib.input.FileInputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, String inputPath, Job job) throws IOException {
 		// set input path in Job
 		org.apache.hadoop.mapreduce.lib.input.FileInputFormat.addInputPath(job, new org.apache.hadoop.fs.Path(inputPath));
 		// return wrapping InputFormat
@@ -99,9 +99,8 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps the Hadoop FileInputFormat.
 	 */
-	public static <K,V> org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> readHadoopFile(
-			org.apache.hadoop.mapreduce.lib.input.FileInputFormat<K,V> mapreduceInputFormat, Class<K> key, Class<V> value, String inputPath) throws IOException
-	{
+	public static <K, V> org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> readHadoopFile(
+			org.apache.hadoop.mapreduce.lib.input.FileInputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, String inputPath) throws IOException {
 		return readHadoopFile(mapreduceInputFormat, key, value, inputPath, Job.getInstance());
 	}
 
@@ -110,9 +109,8 @@ public final class HadoopInputs {
 	 *
 	 * @return A Flink InputFormat that wraps the Hadoop InputFormat.
 	 */
-	public static <K,V> org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> createHadoopInput(
-			org.apache.hadoop.mapreduce.InputFormat<K,V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job)
-	{
+	public static <K, V> org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> createHadoopInput(
+			org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
 		return new org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<>(mapreduceInputFormat, key, value, job);
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java
index 97ca329..738e2f8 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java
@@ -18,8 +18,9 @@
 
 package org.apache.flink.hadoopcompatibility;
 
-import org.apache.commons.cli.Option;
 import org.apache.flink.api.java.utils.ParameterTool;
+
+import org.apache.commons.cli.Option;
 import org.apache.hadoop.util.GenericOptionsParser;
 
 import java.io.IOException;
@@ -31,7 +32,7 @@ import java.util.Map;
  */
 public class HadoopUtils {
 	/**
-	 * Returns {@link ParameterTool} for the arguments parsed by {@link GenericOptionsParser}
+	 * Returns {@link ParameterTool} for the arguments parsed by {@link GenericOptionsParser}.
 	 *
 	 * @param args Input array arguments. It should be parsable by {@link GenericOptionsParser}
 	 * @return A {@link ParameterTool}
@@ -49,4 +50,3 @@ public class HadoopUtils {
 	}
 }
 
-

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
index ba8aa90..5b679fe 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopMapFunction.java
@@ -18,68 +18,69 @@
 
 package org.apache.flink.hadoopcompatibility.mapred;
 
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-
 import org.apache.flink.annotation.Public;
 import org.apache.flink.api.common.functions.RichFlatMapFunction;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.typeutils.TupleTypeInfo;
 import org.apache.flink.api.java.typeutils.TypeExtractor;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter;
 import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector;
 import org.apache.flink.util.Collector;
 import org.apache.flink.util.InstantiationUtil;
+
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.Reporter;
 
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+
 /**
- * This wrapper maps a Hadoop Mapper (mapred API) to a Flink FlatMapFunction. 
+ * This wrapper maps a Hadoop Mapper (mapred API) to a Flink FlatMapFunction.
  */
 @SuppressWarnings("rawtypes")
 @Public
-public final class HadoopMapFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
-					extends RichFlatMapFunction<Tuple2<KEYIN,VALUEIN>, Tuple2<KEYOUT,VALUEOUT>> 
-					implements ResultTypeQueryable<Tuple2<KEYOUT,VALUEOUT>>, Serializable {
+public final class HadoopMapFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
+					extends RichFlatMapFunction<Tuple2<KEYIN, VALUEIN>, Tuple2<KEYOUT, VALUEOUT>>
+					implements ResultTypeQueryable<Tuple2<KEYOUT, VALUEOUT>>, Serializable {
 
 	private static final long serialVersionUID = 1L;
 
-	private transient Mapper<KEYIN,VALUEIN,KEYOUT,VALUEOUT> mapper;
+	private transient Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapper;
 	private transient JobConf jobConf;
 
-	private transient HadoopOutputCollector<KEYOUT,VALUEOUT> outputCollector;
+	private transient HadoopOutputCollector<KEYOUT, VALUEOUT> outputCollector;
 	private transient Reporter reporter;
-	
+
 	/**
 	 * Maps a Hadoop Mapper (mapred API) to a Flink FlatMapFunction.
-	 * 
+	 *
 	 * @param hadoopMapper The Hadoop Mapper to wrap.
 	 */
 	public HadoopMapFunction(Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopMapper) {
 		this(hadoopMapper, new JobConf());
 	}
-	
+
 	/**
 	 * Maps a Hadoop Mapper (mapred API) to a Flink FlatMapFunction.
 	 * The Hadoop Mapper is configured with the provided JobConf.
-	 * 
+	 *
 	 * @param hadoopMapper The Hadoop Mapper to wrap.
 	 * @param conf The JobConf that is used to configure the Hadoop Mapper.
 	 */
 	public HadoopMapFunction(Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopMapper, JobConf conf) {
-		if(hadoopMapper == null) {
+		if (hadoopMapper == null) {
 			throw new NullPointerException("Mapper may not be null.");
 		}
-		if(conf == null) {
+		if (conf == null) {
 			throw new NullPointerException("JobConf may not be null.");
 		}
-		
+
 		this.mapper = hadoopMapper;
 		this.jobConf = conf;
 	}
@@ -88,13 +89,13 @@ public final class HadoopMapFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	public void open(Configuration parameters) throws Exception {
 		super.open(parameters);
 		this.mapper.configure(jobConf);
-		
+
 		this.reporter = new HadoopDummyReporter();
 		this.outputCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
 	}
 
 	@Override
-	public void flatMap(final Tuple2<KEYIN,VALUEIN> value, final Collector<Tuple2<KEYOUT,VALUEOUT>> out) 
+	public void flatMap(final Tuple2<KEYIN, VALUEIN> value, final Collector<Tuple2<KEYOUT, VALUEOUT>> out)
 			throws Exception {
 		outputCollector.setFlinkCollector(out);
 		mapper.map(value.f0, value.f1, outputCollector, reporter);
@@ -102,15 +103,15 @@ public final class HadoopMapFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 
 	@SuppressWarnings("unchecked")
 	@Override
-	public TypeInformation<Tuple2<KEYOUT,VALUEOUT>> getProducedType() {	
+	public TypeInformation<Tuple2<KEYOUT, VALUEOUT>> getProducedType() {
 		Class<KEYOUT> outKeyClass = (Class<KEYOUT>) TypeExtractor.getParameterType(Mapper.class, mapper.getClass(), 2);
-		Class<VALUEOUT> outValClass = (Class<VALUEOUT>)TypeExtractor.getParameterType(Mapper.class, mapper.getClass(), 3);
-		
+		Class<VALUEOUT> outValClass = (Class<VALUEOUT>) TypeExtractor.getParameterType(Mapper.class, mapper.getClass(), 3);
+
 		final TypeInformation<KEYOUT> keyTypeInfo = TypeExtractor.getForClass((Class<KEYOUT>) outKeyClass);
 		final TypeInformation<VALUEOUT> valueTypleInfo = TypeExtractor.getForClass((Class<VALUEOUT>) outValClass);
-		return new TupleTypeInfo<Tuple2<KEYOUT,VALUEOUT>>(keyTypeInfo, valueTypleInfo);
+		return new TupleTypeInfo<Tuple2<KEYOUT, VALUEOUT>>(keyTypeInfo, valueTypleInfo);
 	}
-	
+
 	/**
 	 * Custom serialization methods.
 	 * @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
@@ -122,10 +123,10 @@ public final class HadoopMapFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 
 	@SuppressWarnings("unchecked")
 	private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
-		Class<Mapper<KEYIN,VALUEIN,KEYOUT,VALUEOUT>> mapperClass = 
-				(Class<Mapper<KEYIN,VALUEIN,KEYOUT,VALUEOUT>>)in.readObject();
+		Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> mapperClass =
+				(Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject();
 		mapper = InstantiationUtil.instantiate(mapperClass);
-		
+
 		jobConf = new JobConf();
 		jobConf.readFields(in);
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
index c1acc2b..fd0d37d 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceCombineFunction.java
@@ -18,81 +18,82 @@
 
 package org.apache.flink.hadoopcompatibility.mapred;
 
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-
 import org.apache.flink.annotation.Public;
 import org.apache.flink.api.common.functions.GroupCombineFunction;
 import org.apache.flink.api.common.functions.RichGroupReduceFunction;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.typeutils.TupleTypeInfo;
 import org.apache.flink.api.java.typeutils.TypeExtractor;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter;
 import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector;
 import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopTupleUnwrappingIterator;
 import org.apache.flink.util.Collector;
 import org.apache.flink.util.InstantiationUtil;
+
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+
 /**
  * This wrapper maps a Hadoop Reducer and Combiner (mapred API) to a combinable Flink GroupReduceFunction.
  */
 @SuppressWarnings("rawtypes")
 @Public
-public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
-	extends RichGroupReduceFunction<Tuple2<KEYIN,VALUEIN>,Tuple2<KEYOUT,VALUEOUT>>
-	implements GroupCombineFunction<Tuple2<KEYIN,VALUEIN>, Tuple2<KEYIN,VALUEIN>>,
-				ResultTypeQueryable<Tuple2<KEYOUT,VALUEOUT>>, Serializable {
+public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
+	extends RichGroupReduceFunction<Tuple2<KEYIN, VALUEIN>, Tuple2<KEYOUT, VALUEOUT>>
+	implements GroupCombineFunction<Tuple2<KEYIN, VALUEIN>, Tuple2<KEYIN, VALUEIN>>,
+				ResultTypeQueryable<Tuple2<KEYOUT, VALUEOUT>>, Serializable {
 
 	private static final long serialVersionUID = 1L;
 
-	private transient Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT> reducer;
-	private transient Reducer<KEYIN,VALUEIN,KEYIN,VALUEIN> combiner;
+	private transient Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reducer;
+	private transient Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN> combiner;
 	private transient JobConf jobConf;
-	
+
 	private transient HadoopTupleUnwrappingIterator<KEYIN, VALUEIN> valueIterator;
-	private transient HadoopOutputCollector<KEYOUT,VALUEOUT> reduceCollector;
-	private transient HadoopOutputCollector<KEYIN,VALUEIN> combineCollector;
+	private transient HadoopOutputCollector<KEYOUT, VALUEOUT> reduceCollector;
+	private transient HadoopOutputCollector<KEYIN, VALUEIN> combineCollector;
 	private transient Reporter reporter;
 
 	/**
 	 * Maps two Hadoop Reducer (mapred API) to a combinable Flink GroupReduceFunction.
-	 * 
+	 *
 	 * @param hadoopReducer The Hadoop Reducer that is mapped to a GroupReduceFunction.
 	 * @param hadoopCombiner The Hadoop Reducer that is mapped to the combiner function.
 	 */
 	public HadoopReduceCombineFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer,
-										Reducer<KEYIN,VALUEIN,KEYIN,VALUEIN> hadoopCombiner) {
+										Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN> hadoopCombiner) {
 		this(hadoopReducer, hadoopCombiner, new JobConf());
 	}
-	
+
 	/**
 	 * Maps two Hadoop Reducer (mapred API) to a combinable Flink GroupReduceFunction.
-	 * 
+	 *
 	 * @param hadoopReducer The Hadoop Reducer that is mapped to a GroupReduceFunction.
 	 * @param hadoopCombiner The Hadoop Reducer that is mapped to the combiner function.
 	 * @param conf The JobConf that is used to configure both Hadoop Reducers.
 	 */
 	public HadoopReduceCombineFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer,
-								Reducer<KEYIN,VALUEIN,KEYIN,VALUEIN> hadoopCombiner, JobConf conf) {
-		if(hadoopReducer == null) {
+								Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN> hadoopCombiner, JobConf conf) {
+		if (hadoopReducer == null) {
 			throw new NullPointerException("Reducer may not be null.");
 		}
-		if(hadoopCombiner == null) {
+		if (hadoopCombiner == null) {
 			throw new NullPointerException("Combiner may not be null.");
 		}
-		if(conf == null) {
+		if (conf == null) {
 			throw new NullPointerException("JobConf may not be null.");
 		}
-		
+
 		this.reducer = hadoopReducer;
 		this.combiner = hadoopCombiner;
 		this.jobConf = conf;
@@ -104,7 +105,7 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 		super.open(parameters);
 		this.reducer.configure(jobConf);
 		this.combiner.configure(jobConf);
-		
+
 		this.reporter = new HadoopDummyReporter();
 		Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
 		TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
@@ -114,7 +115,7 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	}
 
 	@Override
-	public void reduce(final Iterable<Tuple2<KEYIN,VALUEIN>> values, final Collector<Tuple2<KEYOUT,VALUEOUT>> out)
+	public void reduce(final Iterable<Tuple2<KEYIN, VALUEIN>> values, final Collector<Tuple2<KEYOUT, VALUEOUT>> out)
 			throws Exception {
 		reduceCollector.setFlinkCollector(out);
 		valueIterator.set(values.iterator());
@@ -122,7 +123,7 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	}
 
 	@Override
-	public void combine(final Iterable<Tuple2<KEYIN,VALUEIN>> values, final Collector<Tuple2<KEYIN,VALUEIN>> out) throws Exception {
+	public void combine(final Iterable<Tuple2<KEYIN, VALUEIN>> values, final Collector<Tuple2<KEYIN, VALUEIN>> out) throws Exception {
 		combineCollector.setFlinkCollector(out);
 		valueIterator.set(values.iterator());
 		combiner.reduce(valueIterator.getCurrentKey(), valueIterator, combineCollector, reporter);
@@ -130,9 +131,9 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 
 	@SuppressWarnings("unchecked")
 	@Override
-	public TypeInformation<Tuple2<KEYOUT,VALUEOUT>> getProducedType() {
+	public TypeInformation<Tuple2<KEYOUT, VALUEOUT>> getProducedType() {
 		Class<KEYOUT> outKeyClass = (Class<KEYOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 2);
-		Class<VALUEOUT> outValClass = (Class<VALUEOUT>)TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 3);
+		Class<VALUEOUT> outValClass = (Class<VALUEOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 3);
 
 		final TypeInformation<KEYOUT> keyTypeInfo = TypeExtractor.getForClass(outKeyClass);
 		final TypeInformation<VALUEOUT> valueTypleInfo = TypeExtractor.getForClass(outValClass);
@@ -144,7 +145,7 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	 * @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
 	 */
 	private void writeObject(final ObjectOutputStream out) throws IOException {
-		
+
 		out.writeObject(reducer.getClass());
 		out.writeObject(combiner.getClass());
 		jobConf.write(out);
@@ -152,15 +153,15 @@ public final class HadoopReduceCombineFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 
 	@SuppressWarnings("unchecked")
 	private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
-		
-		Class<Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT>> reducerClass = 
-				(Class<Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT>>)in.readObject();
+
+		Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass =
+				(Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject();
 		reducer = InstantiationUtil.instantiate(reducerClass);
-		
-		Class<Reducer<KEYIN,VALUEIN,KEYIN,VALUEIN>> combinerClass = 
-				(Class<Reducer<KEYIN,VALUEIN,KEYIN,VALUEIN>>)in.readObject();
+
+		Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>> combinerClass =
+				(Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>>) in.readObject();
 		combiner = InstantiationUtil.instantiate(combinerClass);
-		
+
 		jobConf = new JobConf();
 		jobConf.readFields(in);
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
index 55aea24..fadd0b2 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/HadoopReduceFunction.java
@@ -18,70 +18,71 @@
 
 package org.apache.flink.hadoopcompatibility.mapred;
 
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-
 import org.apache.flink.annotation.Public;
 import org.apache.flink.api.common.functions.RichGroupReduceFunction;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.typeutils.TupleTypeInfo;
 import org.apache.flink.api.java.typeutils.TypeExtractor;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter;
 import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector;
 import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopTupleUnwrappingIterator;
 import org.apache.flink.util.Collector;
 import org.apache.flink.util.InstantiationUtil;
+
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+
 /**
- * This wrapper maps a Hadoop Reducer (mapred API) to a non-combinable Flink GroupReduceFunction. 
+ * This wrapper maps a Hadoop Reducer (mapred API) to a non-combinable Flink GroupReduceFunction.
  */
 @SuppressWarnings("rawtypes")
 @Public
-public final class HadoopReduceFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
-					extends RichGroupReduceFunction<Tuple2<KEYIN,VALUEIN>,Tuple2<KEYOUT,VALUEOUT>> 
-					implements ResultTypeQueryable<Tuple2<KEYOUT,VALUEOUT>>, Serializable {
+public final class HadoopReduceFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
+					extends RichGroupReduceFunction<Tuple2<KEYIN, VALUEIN>, Tuple2<KEYOUT, VALUEOUT>>
+					implements ResultTypeQueryable<Tuple2<KEYOUT, VALUEOUT>>, Serializable {
 
 	private static final long serialVersionUID = 1L;
 
-	private transient Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT> reducer;
+	private transient Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reducer;
 	private transient JobConf jobConf;
-	
+
 	private transient HadoopTupleUnwrappingIterator<KEYIN, VALUEIN> valueIterator;
-	private transient HadoopOutputCollector<KEYOUT,VALUEOUT> reduceCollector;
+	private transient HadoopOutputCollector<KEYOUT, VALUEOUT> reduceCollector;
 	private transient Reporter reporter;
-	
+
 	/**
 	 * Maps a Hadoop Reducer (mapred API) to a non-combinable Flink GroupReduceFunction.
- 	 * 
+ 	 *
 	 * @param hadoopReducer The Hadoop Reducer to wrap.
 	 */
 	public HadoopReduceFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer) {
 		this(hadoopReducer, new JobConf());
 	}
-	
+
 	/**
 	 * Maps a Hadoop Reducer (mapred API) to a non-combinable Flink GroupReduceFunction.
- 	 * 
+ 	 *
 	 * @param hadoopReducer The Hadoop Reducer to wrap.
 	 * @param conf The JobConf that is used to configure the Hadoop Reducer.
 	 */
 	public HadoopReduceFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer, JobConf conf) {
-		if(hadoopReducer == null) {
+		if (hadoopReducer == null) {
 			throw new NullPointerException("Reducer may not be null.");
 		}
-		if(conf == null) {
+		if (conf == null) {
 			throw new NullPointerException("JobConf may not be null.");
 		}
-		
+
 		this.reducer = hadoopReducer;
 		this.jobConf = conf;
 	}
@@ -91,7 +92,7 @@ public final class HadoopReduceFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	public void open(Configuration parameters) throws Exception {
 		super.open(parameters);
 		this.reducer.configure(jobConf);
-		
+
 		this.reporter = new HadoopDummyReporter();
 		this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
 		Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
@@ -100,9 +101,9 @@ public final class HadoopReduceFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 	}
 
 	@Override
-	public void reduce(final Iterable<Tuple2<KEYIN,VALUEIN>> values, final Collector<Tuple2<KEYOUT,VALUEOUT>> out)
+	public void reduce(final Iterable<Tuple2<KEYIN, VALUEIN>> values, final Collector<Tuple2<KEYOUT, VALUEOUT>> out)
 			throws Exception {
-		
+
 		reduceCollector.setFlinkCollector(out);
 		valueIterator.set(values.iterator());
 		reducer.reduce(valueIterator.getCurrentKey(), valueIterator, reduceCollector, reporter);
@@ -110,32 +111,32 @@ public final class HadoopReduceFunction<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
 
 	@SuppressWarnings("unchecked")
 	@Override
-	public TypeInformation<Tuple2<KEYOUT,VALUEOUT>> getProducedType() {
+	public TypeInformation<Tuple2<KEYOUT, VALUEOUT>> getProducedType() {
 		Class<KEYOUT> outKeyClass = (Class<KEYOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 2);
-		Class<VALUEOUT> outValClass = (Class<VALUEOUT>)TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 3);
+		Class<VALUEOUT> outValClass = (Class<VALUEOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 3);
 
 		final TypeInformation<KEYOUT> keyTypeInfo = TypeExtractor.getForClass((Class<KEYOUT>) outKeyClass);
 		final TypeInformation<VALUEOUT> valueTypleInfo = TypeExtractor.getForClass((Class<VALUEOUT>) outValClass);
-		return new TupleTypeInfo<Tuple2<KEYOUT,VALUEOUT>>(keyTypeInfo, valueTypleInfo);
+		return new TupleTypeInfo<Tuple2<KEYOUT, VALUEOUT>>(keyTypeInfo, valueTypleInfo);
 	}
 
 	/**
-	 * Custom serialization methods
+	 * Custom serialization methods.
 	 * @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
 	 */
 	private void writeObject(final ObjectOutputStream out) throws IOException {
-		
+
 		out.writeObject(reducer.getClass());
-		jobConf.write(out);		
+		jobConf.write(out);
 	}
 
 	@SuppressWarnings("unchecked")
 	private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
-		
-		Class<Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT>> reducerClass = 
-				(Class<Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT>>)in.readObject();
+
+		Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass =
+				(Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject();
 		reducer = InstantiationUtil.instantiate(reducerClass);
-		
+
 		jobConf = new JobConf();
 		jobConf.readFields(in);
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
index bfe03d3..ff9e686 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
@@ -20,6 +20,7 @@ package org.apache.flink.hadoopcompatibility.mapred.wrapper;
 
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.util.Collector;
+
 import org.apache.hadoop.mapred.OutputCollector;
 
 import java.io.IOException;
@@ -28,24 +29,24 @@ import java.io.IOException;
  * A Hadoop OutputCollector that wraps a Flink OutputCollector.
  * On each call of collect() the data is forwarded to the wrapped Flink collector.
  */
-public final class HadoopOutputCollector<KEY,VALUE> implements OutputCollector<KEY,VALUE> {
+public final class HadoopOutputCollector<KEY, VALUE> implements OutputCollector<KEY, VALUE> {
 
-	private Collector<Tuple2<KEY,VALUE>> flinkCollector;
+	private Collector<Tuple2<KEY, VALUE>> flinkCollector;
 
-	private final Tuple2<KEY,VALUE> outTuple = new Tuple2<KEY, VALUE>();
+	private final Tuple2<KEY, VALUE> outTuple = new Tuple2<KEY, VALUE>();
 
 	/**
 	 * Set the wrapped Flink collector.
-	 * 
+	 *
 	 * @param flinkCollector The wrapped Flink OutputCollector.
 	 */
 	public void setFlinkCollector(Collector<Tuple2<KEY, VALUE>> flinkCollector) {
 		this.flinkCollector = flinkCollector;
 	}
-	
+
 	/**
-	 * Use the wrapped Flink collector to collect a key-value pair for Flink. 
-	 * 
+	 * Use the wrapped Flink collector to collect a key-value pair for Flink.
+	 *
 	 * @param key the key to collect
 	 * @param val the value to collect
 	 * @throws IOException unexpected of key or value in key-value pair.

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIterator.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIterator.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIterator.java
index 2d204b8..c58b5df 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIterator.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIterator.java
@@ -18,26 +18,26 @@
 
 package org.apache.flink.hadoopcompatibility.mapred.wrapper;
 
-import java.util.Iterator;
-
 import org.apache.flink.api.common.typeutils.TypeSerializer;
 import org.apache.flink.api.java.operators.translation.TupleUnwrappingIterator;
 import org.apache.flink.api.java.tuple.Tuple2;
 
+import java.util.Iterator;
+
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
  * Wraps a Flink Tuple2 (key-value-pair) iterator into an iterator over the second (value) field.
  */
-public class HadoopTupleUnwrappingIterator<KEY,VALUE> 
+public class HadoopTupleUnwrappingIterator<KEY, VALUE>
 		extends TupleUnwrappingIterator<VALUE, KEY> implements java.io.Serializable {
 
 	private static final long serialVersionUID = 1L;
 
 	private final TypeSerializer<KEY> keySerializer;
 
-	private transient Iterator<Tuple2<KEY,VALUE>> iterator;
-	
+	private transient Iterator<Tuple2<KEY, VALUE>> iterator;
+
 	private transient KEY curKey;
 	private transient VALUE firstValue;
 	private transient boolean atFirst;
@@ -45,16 +45,16 @@ public class HadoopTupleUnwrappingIterator<KEY,VALUE>
 	public HadoopTupleUnwrappingIterator(TypeSerializer<KEY> keySerializer) {
 		this.keySerializer = checkNotNull(keySerializer);
 	}
-	
+
 	/**
 	 * Set the Flink iterator to wrap.
-	 * 
+	 *
 	 * @param iterator The Flink iterator to wrap.
 	 */
 	@Override
-	public void set(final Iterator<Tuple2<KEY,VALUE>> iterator) {
+	public void set(final Iterator<Tuple2<KEY, VALUE>> iterator) {
 		this.iterator = iterator;
-		if(this.hasNext()) {
+		if (this.hasNext()) {
 			final Tuple2<KEY, VALUE> tuple = iterator.next();
 			this.curKey = keySerializer.copy(tuple.f0);
 			this.firstValue = tuple.f1;
@@ -63,30 +63,30 @@ public class HadoopTupleUnwrappingIterator<KEY,VALUE>
 			this.atFirst = false;
 		}
 	}
-	
+
 	@Override
 	public boolean hasNext() {
-		if(this.atFirst) {
+		if (this.atFirst) {
 			return true;
 		}
 		return iterator.hasNext();
 	}
-	
+
 	@Override
 	public VALUE next() {
-		if(this.atFirst) {
+		if (this.atFirst) {
 			this.atFirst = false;
 			return firstValue;
 		}
-		
+
 		final Tuple2<KEY, VALUE> tuple = iterator.next();
 		return tuple.f1;
 	}
-	
+
 	public KEY getCurrentKey() {
 		return this.curKey;
 	}
-	
+
 	@Override
 	public void remove() {
 		throw new UnsupportedOperationException();

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/main/scala/org/apache/flink/hadoopcompatibility/scala/HadoopInputs.scala
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/scala/org/apache/flink/hadoopcompatibility/scala/HadoopInputs.scala b/flink-connectors/flink-hadoop-compatibility/src/main/scala/org/apache/flink/hadoopcompatibility/scala/HadoopInputs.scala
index 133a5f4..a59af64 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/scala/org/apache/flink/hadoopcompatibility/scala/HadoopInputs.scala
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/scala/org/apache/flink/hadoopcompatibility/scala/HadoopInputs.scala
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.hadoopcompatibility.scala
 
 import org.apache.flink.api.common.typeinfo.TypeInformation
-import org.apache.flink.api.scala.hadoop.mapreduce
-import org.apache.flink.api.scala.hadoop.mapred
+import org.apache.flink.api.scala.hadoop.{mapred, mapreduce}
 import org.apache.hadoop.fs.{Path => HadoopPath}
 import org.apache.hadoop.mapred.{JobConf, FileInputFormat => MapredFileInputFormat, InputFormat => MapredInputFormat}
 import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => MapreduceFileInputFormat}
@@ -81,7 +81,7 @@ object HadoopInputs {
       key,
       value,
       inputPath
-    )
+   )
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableExtractionTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableExtractionTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableExtractionTest.java
index 2aefd9f..1fb3407 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableExtractionTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableExtractionTest.java
@@ -25,7 +25,6 @@ import org.apache.flink.api.common.typeinfo.TypeInformation;
 
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparator;
-
 import org.junit.Test;
 
 import java.io.DataInput;
@@ -33,8 +32,14 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.List;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+/**
+ * Tests for the type extraction of {@link Writable}.
+ */
 @SuppressWarnings("serial")
 public class WritableExtractionTest {
 
@@ -64,7 +69,7 @@ public class WritableExtractionTest {
 				TypeExtractor.createHadoopWritableTypeInfo(ViaInterfaceExtension.class);
 		assertEquals(ViaInterfaceExtension.class, info2.getTypeClass());
 
-		TypeInformation<ViaAbstractClassExtension> info3 = 
+		TypeInformation<ViaAbstractClassExtension> info3 =
 				TypeExtractor.createHadoopWritableTypeInfo(ViaAbstractClassExtension.class);
 		assertEquals(ViaAbstractClassExtension.class, info3.getTypeClass());
 	}
@@ -110,7 +115,7 @@ public class WritableExtractionTest {
 			}
 		};
 
-		TypeInformation<DirectWritable> outType = 
+		TypeInformation<DirectWritable> outType =
 				TypeExtractor.getMapReturnTypes(function, new WritableTypeInfo<>(DirectWritable.class));
 
 		assertTrue(outType instanceof WritableTypeInfo);
@@ -119,14 +124,14 @@ public class WritableExtractionTest {
 
 	@Test
 	public void testExtractAsPartOfPojo() {
-		PojoTypeInfo<PojoWithWritable> pojoInfo = 
+		PojoTypeInfo<PojoWithWritable> pojoInfo =
 				(PojoTypeInfo<PojoWithWritable>) TypeExtractor.getForClass(PojoWithWritable.class);
 
 		boolean foundWritable = false;
 		for (int i = 0; i < pojoInfo.getArity(); i++) {
 			PojoField field = pojoInfo.getPojoFieldAt(i);
 			String name = field.getField().getName();
-			
+
 			if (name.equals("hadoopCitizen")) {
 				if (foundWritable) {
 					fail("already seen");
@@ -134,10 +139,10 @@ public class WritableExtractionTest {
 				foundWritable = true;
 				assertEquals(new WritableTypeInfo<>(DirectWritable.class), field.getTypeInformation());
 				assertEquals(DirectWritable.class, field.getTypeInformation().getTypeClass());
-				
+
 			}
 		}
-		
+
 		assertTrue("missed the writable type", foundWritable);
 	}
 
@@ -152,9 +157,9 @@ public class WritableExtractionTest {
 		};
 
 		@SuppressWarnings("unchecked")
-		TypeInformation<Writable> inType = 
+		TypeInformation<Writable> inType =
 				(TypeInformation<Writable>) (TypeInformation<?>) new WritableTypeInfo<>(DirectWritable.class);
-		
+
 		try {
 			TypeExtractor.getMapReturnTypes(function, inType);
 			fail("exception expected");
@@ -168,11 +173,11 @@ public class WritableExtractionTest {
 	//  test type classes
 	// ------------------------------------------------------------------------
 
-	public interface ExtendedWritable extends Writable {}
+	private interface ExtendedWritable extends Writable {}
 
-	public static abstract class AbstractWritable implements Writable {}
+	private abstract static class AbstractWritable implements Writable {}
 
-	public static class DirectWritable implements Writable {
+	private static class DirectWritable implements Writable {
 
 		@Override
 		public void write(DataOutput dataOutput) throws IOException {}
@@ -181,7 +186,7 @@ public class WritableExtractionTest {
 		public void readFields(DataInput dataInput) throws IOException {}
 	}
 
-	public static class ViaInterfaceExtension implements ExtendedWritable {
+	private static class ViaInterfaceExtension implements ExtendedWritable {
 
 		@Override
 		public void write(DataOutput dataOutput) throws IOException {}
@@ -190,7 +195,7 @@ public class WritableExtractionTest {
 		public void readFields(DataInput dataInput) throws IOException {}
 	}
 
-	public static class ViaAbstractClassExtension extends AbstractWritable {
+	private static class ViaAbstractClassExtension extends AbstractWritable {
 
 		@Override
 		public void write(DataOutput dataOutput) throws IOException {}
@@ -199,6 +204,9 @@ public class WritableExtractionTest {
 		public void readFields(DataInput dataInput) throws IOException {}
 	}
 
+	/**
+	 * Test Pojo containing a {@link DirectWritable}.
+	 */
 	public static class PojoWithWritable {
 		public String str;
 		public DirectWritable hadoopCitizen;

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableInfoParserTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableInfoParserTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableInfoParserTest.java
index 3d2b652..7262bb7 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableInfoParserTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableInfoParserTest.java
@@ -22,8 +22,8 @@ import org.apache.flink.api.common.typeinfo.BasicArrayTypeInfo;
 import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.hadoop.io.Writable;
 
+import org.apache.hadoop.io.Writable;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -31,6 +31,9 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+/**
+ * Tests for the type information parsing of {@link Writable}.
+ */
 public class WritableInfoParserTest {
 
 	@Test
@@ -66,7 +69,7 @@ public class WritableInfoParserTest {
 	//  Test types
 	// ------------------------------------------------------------------------
 
-	public static class MyWritable implements Writable {
+	private static class MyWritable implements Writable {
 
 		@Override
 		public void write(DataOutput out) throws IOException {}
@@ -75,6 +78,9 @@ public class WritableInfoParserTest {
 		public void readFields(DataInput in) throws IOException {}
 	}
 
+	/**
+	 * Test Pojo containing a {@link Writable}.
+	 */
 	public static class MyPojo {
 		public Integer basic;
 		public Tuple2<String, Integer> tuple;

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableTypeInfoTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableTypeInfoTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableTypeInfoTest.java
index 666ab84..903c856 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableTypeInfoTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/WritableTypeInfoTest.java
@@ -18,11 +18,13 @@
 
 package org.apache.flink.api.java.typeutils;
 
+import org.apache.flink.api.common.typeutils.TypeInformationTestBase;
+
+import org.apache.hadoop.io.Writable;
+
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import org.apache.flink.api.common.typeutils.TypeInformationTestBase;
-import org.apache.hadoop.io.Writable;
 
 /**
  * Test for {@link WritableTypeInfo}.
@@ -41,7 +43,7 @@ public class WritableTypeInfoTest extends TypeInformationTestBase<WritableTypeIn
 	//  test types
 	// ------------------------------------------------------------------------
 
-	public static class TestClass implements Writable {
+	private static class TestClass implements Writable {
 
 		@Override
 		public void write(DataOutput dataOutput) throws IOException {}
@@ -50,7 +52,7 @@ public class WritableTypeInfoTest extends TypeInformationTestBase<WritableTypeIn
 		public void readFields(DataInput dataInput) throws IOException {}
 	}
 
-	public static class AlternateClass implements Writable {
+	private static class AlternateClass implements Writable {
 
 		@Override
 		public void write(DataOutput dataOutput) throws IOException {}

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/StringArrayWritable.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/StringArrayWritable.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/StringArrayWritable.java
index 8c3a8cd..6101c0a 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/StringArrayWritable.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/StringArrayWritable.java
@@ -19,64 +19,68 @@
 package org.apache.flink.api.java.typeutils.runtime;
 
 import org.apache.flink.configuration.ConfigConstants;
+
 import org.apache.hadoop.io.Writable;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+/**
+ * A {@link Writable} and {@link Comparable} wrapper for a string array.
+ */
 public class StringArrayWritable implements Writable, Comparable<StringArrayWritable> {
-	
+
 	private String[] array = new String[0];
-	
+
 	public StringArrayWritable() {
 		super();
 	}
-	
+
 	public StringArrayWritable(String[] array) {
 		this.array = array;
 	}
-	
+
 	@Override
 	public void write(DataOutput out) throws IOException {
 		out.writeInt(this.array.length);
-		
-		for(String str : this.array) {
+
+		for (String str : this.array) {
 			byte[] b = str.getBytes(ConfigConstants.DEFAULT_CHARSET);
 			out.writeInt(b.length);
 			out.write(b);
 		}
 	}
-	
+
 	@Override
 	public void readFields(DataInput in) throws IOException {
 		this.array = new String[in.readInt()];
-		
-		for(int i = 0; i < this.array.length; i++) {
+
+		for (int i = 0; i < this.array.length; i++) {
 			byte[] b = new byte[in.readInt()];
 			in.readFully(b);
 			this.array[i] = new String(b, ConfigConstants.DEFAULT_CHARSET);
 		}
 	}
-	
+
 	@Override
 	public int compareTo(StringArrayWritable o) {
-		if(this.array.length != o.array.length) {
+		if (this.array.length != o.array.length) {
 			return this.array.length - o.array.length;
 		}
-		
-		for(int i = 0; i < this.array.length; i++) {
+
+		for (int i = 0; i < this.array.length; i++) {
 			int comp = this.array[i].compareTo(o.array[i]);
-			if(comp != 0) {
+			if (comp != 0) {
 				return comp;
 			}
 		}
 		return 0;
 	}
-	
+
 	@Override
 	public boolean equals(Object obj) {
-		if(!(obj instanceof StringArrayWritable)) {
+		if (!(obj instanceof StringArrayWritable)) {
 			return false;
 		}
 		return this.compareTo((StringArrayWritable) obj) == 0;

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorTest.java
index 96f844c..104f754 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorTest.java
@@ -22,30 +22,33 @@ import org.apache.flink.api.common.typeutils.ComparatorTestBase;
 import org.apache.flink.api.common.typeutils.TypeComparator;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
 
+/**
+ * Tests for the {@link WritableComparator}.
+ */
 public class WritableComparatorTest extends ComparatorTestBase<StringArrayWritable> {
-	
+
 	StringArrayWritable[] data = new StringArrayWritable[]{
 			new StringArrayWritable(new String[]{}),
 			new StringArrayWritable(new String[]{""}),
-			new StringArrayWritable(new String[]{"a","a"}),
-			new StringArrayWritable(new String[]{"a","b"}),
-			new StringArrayWritable(new String[]{"c","c"}),
-			new StringArrayWritable(new String[]{"d","f"}),
-			new StringArrayWritable(new String[]{"d","m"}),
-			new StringArrayWritable(new String[]{"z","x"}),
-			new StringArrayWritable(new String[]{"a","a", "a"})
+			new StringArrayWritable(new String[]{"a", "a"}),
+			new StringArrayWritable(new String[]{"a", "b"}),
+			new StringArrayWritable(new String[]{"c", "c"}),
+			new StringArrayWritable(new String[]{"d", "f"}),
+			new StringArrayWritable(new String[]{"d", "m"}),
+			new StringArrayWritable(new String[]{"z", "x"}),
+			new StringArrayWritable(new String[]{"a", "a", "a"})
 	};
-	
+
 	@Override
 	protected TypeComparator<StringArrayWritable> createComparator(boolean ascending) {
 		return new WritableComparator<StringArrayWritable>(ascending, StringArrayWritable.class);
 	}
-	
+
 	@Override
 	protected TypeSerializer<StringArrayWritable> createSerializer() {
 		return new WritableSerializer<StringArrayWritable>(StringArrayWritable.class);
 	}
-	
+
 	@Override
 	protected StringArrayWritable[] getSortedTestData() {
 		return data;

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorUUIDTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorUUIDTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorUUIDTest.java
index 94e759d..f8d86de 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorUUIDTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableComparatorUUIDTest.java
@@ -24,6 +24,9 @@ import org.apache.flink.api.common.typeutils.TypeSerializer;
 
 import java.util.UUID;
 
+/**
+ * Tests for the {@link WritableComparator} with {@link WritableID}.
+ */
 public class WritableComparatorUUIDTest extends ComparatorTestBase<WritableID> {
 	@Override
 	protected TypeComparator<WritableID> createComparator(boolean ascending) {

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableID.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableID.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableID.java
index 4274cf6..47ddf42 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableID.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableID.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.api.java.typeutils.runtime;
 
+import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 
 import java.io.DataInput;
@@ -25,6 +26,9 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.UUID;
 
+/**
+ * Test object that is both {@link Comparable} and {@link Writable}.
+ */
 public class WritableID implements WritableComparable<WritableID> {
 	private UUID uuid;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerTest.java
index bb5f4d4..9779c17 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerTest.java
@@ -22,29 +22,33 @@ import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeutils.SerializerTestInstance;
 import org.apache.flink.api.java.typeutils.TypeExtractor;
 import org.apache.flink.api.java.typeutils.WritableTypeInfo;
+
 import org.junit.Test;
 
+/**
+ * Tests for the {@link WritableSerializer}.
+ */
 public class WritableSerializerTest {
-	
+
 	@Test
 	public void testStringArrayWritable() {
 		StringArrayWritable[] data = new StringArrayWritable[]{
 				new StringArrayWritable(new String[]{}),
 				new StringArrayWritable(new String[]{""}),
-				new StringArrayWritable(new String[]{"a","a"}),
-				new StringArrayWritable(new String[]{"a","b"}),
-				new StringArrayWritable(new String[]{"c","c"}),
-				new StringArrayWritable(new String[]{"d","f"}),
-				new StringArrayWritable(new String[]{"d","m"}),
-				new StringArrayWritable(new String[]{"z","x"}),
-				new StringArrayWritable(new String[]{"a","a", "a"})
+				new StringArrayWritable(new String[]{"a", "a"}),
+				new StringArrayWritable(new String[]{"a", "b"}),
+				new StringArrayWritable(new String[]{"c", "c"}),
+				new StringArrayWritable(new String[]{"d", "f"}),
+				new StringArrayWritable(new String[]{"d", "m"}),
+				new StringArrayWritable(new String[]{"z", "x"}),
+				new StringArrayWritable(new String[]{"a", "a", "a"})
 		};
-		
+
 		WritableTypeInfo<StringArrayWritable> writableTypeInfo = (WritableTypeInfo<StringArrayWritable>) TypeExtractor.getForObject(data[0]);
 		WritableSerializer<StringArrayWritable> writableSerializer = (WritableSerializer<StringArrayWritable>) writableTypeInfo.createSerializer(new ExecutionConfig());
-		
-		SerializerTestInstance<StringArrayWritable> testInstance = new SerializerTestInstance<StringArrayWritable>(writableSerializer,writableTypeInfo.getTypeClass(), -1, data);
-		
+
+		SerializerTestInstance<StringArrayWritable> testInstance = new SerializerTestInstance<StringArrayWritable>(writableSerializer, writableTypeInfo.getTypeClass(), -1, data);
+
 		testInstance.testAll();
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerUUIDTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerUUIDTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerUUIDTest.java
index 2af7730..dca043d 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerUUIDTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializerUUIDTest.java
@@ -23,6 +23,9 @@ import org.apache.flink.api.common.typeutils.TypeSerializer;
 
 import java.util.UUID;
 
+/**
+ * Tests for the {@link WritableSerializer} with {@link WritableID}.
+ */
 public class WritableSerializerUUIDTest extends SerializerTestBase<WritableID> {
 	@Override
 	protected TypeSerializer<WritableID> createSerializer() {

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/hadoopcompatibility/HadoopUtilsTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/hadoopcompatibility/HadoopUtilsTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/hadoopcompatibility/HadoopUtilsTest.java
index 6f7673b..3bda1e6 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/hadoopcompatibility/HadoopUtilsTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/hadoopcompatibility/HadoopUtilsTest.java
@@ -20,10 +20,14 @@ package org.apache.flink.hadoopcompatibility;
 
 import org.apache.flink.api.java.utils.AbstractParameterToolTest;
 import org.apache.flink.api.java.utils.ParameterTool;
+
 import org.junit.Test;
 
 import java.io.IOException;
 
+/**
+ * Tests for the {@link HadoopUtils}.
+ */
 public class HadoopUtilsTest extends AbstractParameterToolTest {
 
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapFunctionITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapFunctionITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapFunctionITCase.java
index 4d1acb4..2fb2f88 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapFunctionITCase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapFunctionITCase.java
@@ -18,14 +18,13 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapred;
 
-import java.io.IOException;
-
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.core.fs.FileSystem;
 import org.apache.flink.hadoopcompatibility.mapred.HadoopMapFunction;
 import org.apache.flink.test.util.MultipleProgramsTestBase;
+
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -38,6 +37,11 @@ import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import java.io.IOException;
+
+/**
+ * IT cases for the {@link HadoopMapFunction}.
+ */
 @RunWith(Parameterized.class)
 public class HadoopMapFunctionITCase extends MultipleProgramsTestBase {
 
@@ -124,53 +128,60 @@ public class HadoopMapFunctionITCase extends MultipleProgramsTestBase {
 
 		compareResultsByLinesInMemory(expected, resultPath);
 	}
-	
 
-	
+	/**
+	 * {@link Mapper} that only forwards records containing "bananas".
+	 */
 	public static class NonPassingMapper implements Mapper<IntWritable, Text, IntWritable, Text> {
-		
+
 		@Override
-		public void map(final IntWritable k, final Text v, 
+		public void map(final IntWritable k, final Text v,
 				final OutputCollector<IntWritable, Text> out, final Reporter r) throws IOException {
-			if ( v.toString().contains("bananas") ) {
-				out.collect(k,v);
+			if (v.toString().contains("bananas")) {
+				out.collect(k, v);
 			}
 		}
-		
+
 		@Override
 		public void configure(final JobConf arg0) { }
 
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
+	/**
+	 * {@link Mapper} that duplicates records.
+	 */
 	public static class DuplicatingMapper implements Mapper<IntWritable, Text, IntWritable, Text> {
-		
+
 		@Override
-		public void map(final IntWritable k, final Text v, 
+		public void map(final IntWritable k, final Text v,
 				final OutputCollector<IntWritable, Text> out, final Reporter r) throws IOException {
 			out.collect(k, v);
 			out.collect(k, new Text(v.toString().toUpperCase()));
 		}
-		
+
 		@Override
 		public void configure(final JobConf arg0) { }
 
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
+	/**
+	 * {@link Mapper} that filters records based on a prefix.
+	 */
 	public static class ConfigurableMapper implements Mapper<IntWritable, Text, IntWritable, Text> {
 		private String filterPrefix;
-		
+
 		@Override
 		public void map(IntWritable k, Text v, OutputCollector<IntWritable, Text> out, Reporter r)
 				throws IOException {
-			if(v.toString().startsWith(filterPrefix)) {
+			if (v.toString().startsWith(filterPrefix)) {
 				out.collect(k, v);
 			}
 		}
-		
+
 		@Override
 		public void configure(JobConf c) {
 			filterPrefix = c.get("my.filterPrefix");

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapredITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapredITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapredITCase.java
index 0b5a366..145eaaa 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapredITCase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopMapredITCase.java
@@ -22,11 +22,15 @@ import org.apache.flink.test.hadoopcompatibility.mapred.example.HadoopMapredComp
 import org.apache.flink.test.testdata.WordCountData;
 import org.apache.flink.test.util.JavaProgramTestBase;
 import org.apache.flink.util.OperatingSystem;
+
 import org.junit.Assume;
 import org.junit.Before;
 
+/**
+ * IT cases for mapred.
+ */
 public class HadoopMapredITCase extends JavaProgramTestBase {
-	
+
 	protected String textPath;
 	protected String resultPath;
 
@@ -47,7 +51,7 @@ public class HadoopMapredITCase extends JavaProgramTestBase {
 	protected void postSubmit() throws Exception {
 		compareResultsByLinesInMemory(WordCountData.COUNTS, resultPath, new String[]{".", "_"});
 	}
-	
+
 	@Override
 	protected void testProgram() throws Exception {
 		HadoopMapredCompatWordCount.main(new String[] { textPath, resultPath });


[12/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-kafka*

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connector-kafka*


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/28e8043b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/28e8043b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/28e8043b

Branch: refs/heads/master
Commit: 28e8043ba09b47c99439fdb536a4226eccf70c07
Parents: c20b396
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:55:15 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:17 2017 +0200

----------------------------------------------------------------------
 .../flink-connector-kafka-0.10/pom.xml          |   4 +-
 .../connectors/kafka/FlinkKafkaConsumer010.java |  19 +-
 .../connectors/kafka/FlinkKafkaProducer010.java |  60 ++---
 .../kafka/Kafka010AvroTableSource.java          |   8 +-
 .../kafka/Kafka010JsonTableSource.java          |   4 +-
 .../connectors/kafka/Kafka010TableSource.java   |   4 +-
 .../kafka/internal/Kafka010Fetcher.java         |   9 +-
 .../internal/KafkaConsumerCallBridge010.java    |   8 +-
 .../src/main/resources/log4j.properties         |   1 -
 .../kafka/Kafka010AvroTableSourceTest.java      |   6 +-
 .../connectors/kafka/Kafka010FetcherTest.java   |   3 -
 .../connectors/kafka/Kafka010ITCase.java        |  30 ++-
 .../kafka/Kafka010JsonTableSourceTest.java      |   6 +-
 .../kafka/Kafka010ProducerITCase.java           |   5 +-
 .../kafka/KafkaTestEnvironmentImpl.java         |  41 ++-
 .../flink-connector-kafka-0.8/pom.xml           |   3 +-
 .../connectors/kafka/FlinkKafkaConsumer08.java  |  63 +++--
 .../connectors/kafka/FlinkKafkaConsumer081.java |   1 +
 .../connectors/kafka/FlinkKafkaConsumer082.java |   1 +
 .../connectors/kafka/FlinkKafkaProducer.java    |  10 +-
 .../connectors/kafka/FlinkKafkaProducer08.java  |   3 +-
 .../kafka/Kafka08AvroTableSource.java           |   8 +-
 .../connectors/kafka/Kafka08JsonTableSink.java  |   9 +-
 .../kafka/Kafka08JsonTableSource.java           |   4 +-
 .../connectors/kafka/Kafka08TableSource.java    |   4 +-
 .../kafka/internals/ClosableBlockingQueue.java  |  97 ++++---
 .../kafka/internals/Kafka08Fetcher.java         |  70 +++--
 .../kafka/internals/KillerWatchDog.java         |   2 +-
 .../kafka/internals/PartitionInfoFetcher.java   |   3 +-
 .../internals/PeriodicOffsetCommitter.java      |  23 +-
 .../kafka/internals/SimpleConsumerThread.java   |  96 ++++---
 .../kafka/internals/ZookeeperOffsetHandler.java |  24 +-
 .../kafka/Kafka08AvroTableSourceTest.java       |   6 +-
 .../connectors/kafka/Kafka08ITCase.java         |  16 +-
 .../kafka/Kafka08JsonTableSinkTest.java         |   6 +-
 .../kafka/Kafka08JsonTableSourceTest.java       |   8 +-
 .../connectors/kafka/Kafka08ProducerITCase.java |   4 +-
 .../connectors/kafka/KafkaConsumer08Test.java   |  43 ++--
 .../connectors/kafka/KafkaLocalSystemTime.java  |  48 ----
 .../connectors/kafka/KafkaProducerTest.java     |  18 +-
 .../kafka/KafkaShortRetention08ITCase.java      |   7 +-
 .../kafka/KafkaTestEnvironmentImpl.java         |  35 +--
 .../internals/ClosableBlockingQueueTest.java    |  84 +++---
 .../flink-connector-kafka-0.9/pom.xml           |   4 +-
 .../connectors/kafka/FlinkKafkaConsumer09.java  |  30 +--
 .../connectors/kafka/FlinkKafkaProducer09.java  |   3 +-
 .../kafka/Kafka09AvroTableSource.java           |   8 +-
 .../connectors/kafka/Kafka09JsonTableSink.java  |   9 +-
 .../kafka/Kafka09JsonTableSource.java           |   4 +-
 .../connectors/kafka/Kafka09TableSource.java    |   4 +-
 .../connectors/kafka/internal/Handover.java     |  32 +--
 .../kafka/internal/Kafka09Fetcher.java          |  16 +-
 .../kafka/internal/KafkaConsumerCallBridge.java |   8 +-
 .../kafka/internal/KafkaConsumerThread.java     |  35 ++-
 .../src/main/resources/log4j.properties         |   1 -
 .../kafka/Kafka09AvroTableSourceTest.java       |   6 +-
 .../connectors/kafka/Kafka09FetcherTest.java    |  21 +-
 .../connectors/kafka/Kafka09ITCase.java         |   5 +-
 .../kafka/Kafka09JsonTableSinkTest.java         |   6 +-
 .../kafka/Kafka09JsonTableSourceTest.java       |   8 +-
 .../connectors/kafka/Kafka09ProducerITCase.java |   4 +-
 .../kafka/Kafka09SecuredRunITCase.java          |   8 +-
 .../connectors/kafka/KafkaProducerTest.java     |  20 +-
 .../kafka/KafkaTestEnvironmentImpl.java         |  41 +--
 .../connectors/kafka/internal/HandoverTest.java |  10 +-
 .../src/test/resources/log4j-test.properties    |   2 +-
 .../flink-connector-kafka-base/pom.xml          |   5 +-
 .../kafka/FlinkKafkaConsumerBase.java           | 101 ++++----
 .../kafka/FlinkKafkaProducerBase.java           |  44 ++--
 .../connectors/kafka/KafkaAvroTableSource.java  |  16 +-
 .../connectors/kafka/KafkaJsonTableSink.java    |   7 +-
 .../connectors/kafka/KafkaJsonTableSource.java  |   4 +-
 .../connectors/kafka/KafkaTableSink.java        |  11 +-
 .../connectors/kafka/KafkaTableSource.java      |   3 +-
 .../kafka/config/OffsetCommitMode.java          |   2 +-
 .../connectors/kafka/config/StartupMode.java    |   9 +-
 .../kafka/internals/AbstractFetcher.java        | 110 ++++----
 .../kafka/internals/ExceptionProxy.java         |  37 +--
 .../kafka/internals/KafkaTopicPartition.java    |  15 +-
 .../internals/KafkaTopicPartitionState.java     |  19 +-
 .../KafkaTopicPartitionStateSentinel.java       |   8 +-
 ...picPartitionStateWithPeriodicWatermarks.java |  23 +-
 ...cPartitionStateWithPunctuatedWatermarks.java |  25 +-
 .../partitioner/FlinkFixedPartitioner.java      |  17 +-
 .../FlinkKafkaDelegatePartitioner.java          |   1 +
 .../AvroRowDeserializationSchema.java           |  21 +-
 .../AvroRowSerializationSchema.java             |  13 +-
 .../JSONDeserializationSchema.java              |   6 +-
 .../JSONKeyValueDeserializationSchema.java      |  16 +-
 .../JsonRowDeserializationSchema.java           |   8 +-
 .../JsonRowSerializationSchema.java             |  11 +-
 .../KeyedDeserializationSchema.java             |   4 +-
 .../KeyedDeserializationSchemaWrapper.java      |   4 +-
 .../serialization/KeyedSerializationSchema.java |  10 +-
 .../KeyedSerializationSchemaWrapper.java        |   3 +-
 ...eInformationKeyValueSerializationSchema.java |  32 +--
 .../kafka/AvroRowDeSerializationSchemaTest.java |  10 +-
 .../kafka/FlinkFixedPartitionerTest.java        | 109 ++++++++
 ...inkKafkaConsumerBaseFrom11MigrationTest.java |   5 +-
 ...inkKafkaConsumerBaseFrom12MigrationTest.java |  31 +--
 .../kafka/FlinkKafkaConsumerBaseTest.java       |  23 +-
 .../kafka/FlinkKafkaProducerBaseTest.java       |  37 +--
 .../kafka/JSONDeserializationSchemaTest.java    |   7 +-
 .../JSONKeyValueDeserializationSchemaTest.java  |   8 +-
 .../kafka/JsonRowDeserializationSchemaTest.java |  10 +-
 .../kafka/JsonRowSerializationSchemaTest.java   |   9 +-
 .../KafkaConsumerPartitionAssignmentTest.java   |   4 +-
 .../connectors/kafka/KafkaConsumerTestBase.java | 256 +++++++++----------
 .../connectors/kafka/KafkaProducerTestBase.java |  15 +-
 .../kafka/KafkaShortRetentionTestBase.java      |  30 +--
 .../kafka/KafkaTableSinkTestBase.java           |  15 +-
 .../kafka/KafkaTableSourceTestBase.java         |  22 +-
 .../connectors/kafka/KafkaTestBase.java         |  19 +-
 .../connectors/kafka/KafkaTestEnvironment.java  |  18 +-
 .../kafka/TestFlinkFixedPartitioner.java        | 104 --------
 .../kafka/internals/AbstractFetcherTest.java    |  24 +-
 .../internals/KafkaTopicPartitionTest.java      |  13 +-
 .../kafka/testutils/AvroTestUtils.java          |  18 +-
 .../kafka/testutils/DataGenerators.java         |  37 +--
 .../kafka/testutils/FailingIdentityMapper.java  |  23 +-
 .../testutils/FakeStandardProducerConfig.java   |   4 +
 .../testutils/JobManagerCommunicationUtils.java |  36 +--
 .../testutils/PartitionValidatingMapper.java    |   8 +-
 .../kafka/testutils/ThrottledMapper.java        |   6 +-
 .../kafka/testutils/Tuple2FlinkPartitioner.java |   4 +-
 .../testutils/ValidatingExactlyOnceSink.java    |  11 +-
 .../testutils/ZooKeeperStringSerializer.java    |   3 +-
 .../src/test/resources/log4j-test.properties    |   1 -
 128 files changed, 1361 insertions(+), 1333 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/pom.xml b/flink-connectors/flink-connector-kafka-0.10/pom.xml
index 231b22e..143cb7f 100644
--- a/flink-connectors/flink-connector-kafka-0.10/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.10/pom.xml
@@ -130,7 +130,7 @@ under the License.
 			<version>${kafka.version}</version>
 			<scope>test</scope>
 		</dependency>
-		
+
 		<dependency>
 			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-tests_${scala.binary.version}</artifactId>
@@ -209,5 +209,5 @@ under the License.
 			</plugin>
 		</plugins>
 	</build>
-	
+
 </project>

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
index 23fc84e..1bbd1dc 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
@@ -29,21 +29,21 @@ import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper;
 import org.apache.flink.util.PropertiesUtil;
 import org.apache.flink.util.SerializedValue;
+
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 
 import java.util.Collections;
-import java.util.Map;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 
-
 /**
  * The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
  * Apache Kafka 0.10.x. The consumer can run in multiple parallel instances, each of which will pull
- * data from one or more Kafka partitions. 
- * 
+ * data from one or more Kafka partitions.
+ *
  * <p>The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
- * during a failure, and that the computation processes elements "exactly once". 
+ * during a failure, and that the computation processes elements "exactly once".
  * (Note: These guarantees naturally assume that Kafka itself does not loose any data.)</p>
  *
  * <p>Please note that Flink snapshots the offsets internally as part of its distributed checkpoints. The offsets
@@ -62,11 +62,10 @@ public class FlinkKafkaConsumer010<T> extends FlinkKafkaConsumer09<T> {
 
 	private static final long serialVersionUID = 2324564345203409112L;
 
-
 	// ------------------------------------------------------------------------
 
 	/**
-	 * Creates a new Kafka streaming source consumer for Kafka 0.10.x
+	 * Creates a new Kafka streaming source consumer for Kafka 0.10.x.
 	 *
 	 * @param topic
 	 *           The name of the topic that should be consumed.
@@ -82,7 +81,7 @@ public class FlinkKafkaConsumer010<T> extends FlinkKafkaConsumer09<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.10.x
 	 *
-	 * This constructor allows passing a {@see KeyedDeserializationSchema} for reading key/value
+	 * <p>This constructor allows passing a {@see KeyedDeserializationSchema} for reading key/value
 	 * pairs, offsets, and topic names from Kafka.
 	 *
 	 * @param topic
@@ -99,7 +98,7 @@ public class FlinkKafkaConsumer010<T> extends FlinkKafkaConsumer09<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.10.x
 	 *
-	 * This constructor allows passing multiple topics to the consumer.
+	 * <p>This constructor allows passing multiple topics to the consumer.
 	 *
 	 * @param topics
 	 *           The Kafka topics to read from.
@@ -115,7 +114,7 @@ public class FlinkKafkaConsumer010<T> extends FlinkKafkaConsumer09<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.10.x
 	 *
-	 * This constructor allows passing multiple topics and a key/value deserialization schema.
+	 * <p>This constructor allows passing multiple topics and a key/value deserialization schema.
 	 *
 	 * @param topics
 	 *           The Kafka topics to read from.

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
index 711fe07..805bc4e 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
@@ -17,8 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
-
 import org.apache.flink.api.common.functions.IterationRuntimeContext;
 import org.apache.flink.api.common.functions.RichFunction;
 import org.apache.flink.api.common.functions.RuntimeContext;
@@ -37,34 +35,35 @@ import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+
 import org.apache.kafka.clients.producer.ProducerRecord;
 
+import java.util.Properties;
+
 import static org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducerBase.getPartitionsByTopic;
 import static org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducerBase.getPropertiesFromBrokerList;
 
-
 /**
  * Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.10.x
  *
- * Implementation note: This producer is a hybrid between a regular regular sink function (a)
+ * <p>Implementation note: This producer is a hybrid between a regular regular sink function (a)
  * and a custom operator (b).
  *
- * For (a), the class implements the SinkFunction and RichFunction interfaces.
+ * <p>For (a), the class implements the SinkFunction and RichFunction interfaces.
  * For (b), it extends the StreamTask class.
  *
- * Details about approach (a):
- *
+ * <p>Details about approach (a):
  *  Pre Kafka 0.10 producers only follow approach (a), allowing users to use the producer using the
  *  DataStream.addSink() method.
  *  Since the APIs exposed in that variant do not allow accessing the the timestamp attached to the record
  *  the Kafka 0.10 producer has a second invocation option, approach (b).
  *
- * Details about approach (b):
+ * <p>Details about approach (b):
  *  Kafka 0.10 supports writing the timestamp attached to a record to Kafka. When adding the
  *  FlinkKafkaProducer010 using the FlinkKafkaProducer010.writeToKafkaWithTimestamps() method, the Kafka producer
  *  can access the internal record timestamp of the record and write it to Kafka.
  *
- * All methods and constructors in this class are marked with the approach they are needed for.
+ * <p>All methods and constructors in this class are marked with the approach they are needed for.
  */
 public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunction<T>, RichFunction {
 
@@ -79,7 +78,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	 * Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to
 	 * the topic.
 	 *
-	 * This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
+	 * <p>This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
 	 *
 	 * @param inStream The stream to write to Kafka
 	 * @param topicId ID of the Kafka topic.
@@ -93,12 +92,11 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 		return writeToKafkaWithTimestamps(inStream, topicId, serializationSchema, producerConfig, new FlinkFixedPartitioner<T>());
 	}
 
-
 	/**
 	 * Creates a FlinkKafkaProducer for a given topic. the sink produces a DataStream to
 	 * the topic.
 	 *
-	 * This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
+	 * <p>This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
 	 *
 	 * @param inStream The stream to write to Kafka
 	 * @param topicId ID of the Kafka topic.
@@ -116,7 +114,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	 * Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to
 	 * the topic.
 	 *
-	 * This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
+	 * <p>This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
 	 *
 	 *  @param inStream The stream to write to Kafka
 	 *  @param topicId The name of the target topic
@@ -212,11 +210,11 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	public FlinkKafkaProducer010(String topicId, KeyedSerializationSchema<T> serializationSchema, Properties producerConfig) {
 		this(topicId, serializationSchema, producerConfig, new FlinkFixedPartitioner<T>());
 	}
-	
+
 	/**
-	 * Create Kafka producer
+	 * Create Kafka producer.
 	 *
-	 * This constructor does not allow writing timestamps to Kafka, it follow approach (a) (see above)
+	 * <p>This constructor does not allow writing timestamps to Kafka, it follow approach (a) (see above)
 	 */
 	public FlinkKafkaProducer010(String topicId, KeyedSerializationSchema<T> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<T> customPartitioner) {
 		// We create a Kafka 09 producer instance here and only "override" (by intercepting) the
@@ -230,7 +228,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	 * Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to
 	 * the topic.
 	 *
-	 * This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
+	 * <p>This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
 	 *
 	 *  @param inStream The stream to write to Kafka
 	 *  @param topicId The name of the target topic
@@ -275,9 +273,9 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	}
 
 	/**
-	 * Create Kafka producer
+	 * Create Kafka producer.
 	 *
-	 * This constructor does not allow writing timestamps to Kafka, it follow approach (a) (see above)
+	 * <p>This constructor does not allow writing timestamps to Kafka, it follow approach (a) (see above)
 	 *
 	 * @deprecated This is a deprecated constructor that does not correctly handle partitioning when
 	 *             producing to multiple topics. Use
@@ -306,13 +304,13 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 		}
 
 		Long timestamp = null;
-		if(this.writeTimestampToKafka) {
+		if (this.writeTimestampToKafka) {
 			timestamp = elementTimestamp;
 		}
 
 		ProducerRecord<byte[], byte[]> record;
 		int[] partitions = internalProducer.topicPartitionsMap.get(targetTopic);
-		if(null == partitions) {
+		if (null == partitions) {
 			partitions = getPartitionsByTopic(targetTopic, internalProducer.producer);
 			internalProducer.topicPartitionsMap.put(targetTopic, partitions);
 		}
@@ -329,10 +327,8 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 		internalProducer.producer.send(record, internalProducer.callback);
 	}
 
-
 	// ----------------- Helper methods implementing methods from SinkFunction and RichFunction (Approach (a)) ----
 
-
 	// ---- Configuration setters
 
 	/**
@@ -341,7 +337,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	 * exceptions will be eventually thrown and cause the streaming program to
 	 * fail (and enter recovery).
 	 *
-	 * Method is only accessible for approach (a) (see above)
+	 * <p>Method is only accessible for approach (a) (see above)
 	 *
 	 * @param logFailuresOnly The flag to indicate logging-only on exceptions.
 	 */
@@ -355,7 +351,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	 * to be acknowledged by the Kafka producer on a checkpoint.
 	 * This way, the producer can guarantee that messages in the Kafka buffers are part of the checkpoint.
 	 *
-	 * Method is only accessible for approach (a) (see above)
+	 * <p>Method is only accessible for approach (a) (see above)
 	 *
 	 * @param flush Flag indicating the flushing mode (true = flush on checkpoint)
 	 */
@@ -365,8 +361,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	}
 
 	/**
-	 * This method is used for approach (a) (see above)
-	 *
+	 * This method is used for approach (a) (see above).
 	 */
 	@Override
 	public void open(Configuration parameters) throws Exception {
@@ -375,7 +370,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	}
 
 	/**
-	 * This method is used for approach (a) (see above)
+	 * This method is used for approach (a) (see above).
 	 */
 	@Override
 	public IterationRuntimeContext getIterationRuntimeContext() {
@@ -384,7 +379,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	}
 
 	/**
-	 * This method is used for approach (a) (see above)
+	 * This method is used for approach (a) (see above).
 	 */
 	@Override
 	public void setRuntimeContext(RuntimeContext t) {
@@ -395,7 +390,7 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 	/**
 	 * Invoke method for using the Sink as DataStream.addSink() sink.
 	 *
-	 * This method is used for approach (a) (see above)
+	 * <p>This method is used for approach (a) (see above)
 	 *
 	 * @param value The input record.
 	 */
@@ -404,14 +399,12 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 		invokeInternal(value, Long.MAX_VALUE);
 	}
 
-
 	// ----------------- Helper methods and classes implementing methods from StreamSink (Approach (b)) ----
 
-
 	/**
 	 * Process method for using the sink with timestamp support.
 	 *
-	 * This method is used for approach (b) (see above)
+	 * <p>This method is used for approach (b) (see above)
 	 */
 	@Override
 	public void processElement(StreamRecord<T> element) throws Exception {
@@ -467,5 +460,4 @@ public class FlinkKafkaProducer010<T> extends StreamSink<T> implements SinkFunct
 		}
 	}
 
-
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
index 1b2abcc..9921428 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
@@ -18,13 +18,15 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
-import org.apache.avro.specific.SpecificRecord;
-import org.apache.avro.specific.SpecificRecordBase;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.types.Row;
 
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.avro.specific.SpecificRecordBase;
+
+import java.util.Properties;
+
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.10.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
index 78ef28e..f400f6b 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
@@ -19,9 +19,9 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
index 03e9125..a6de13a 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
@@ -19,9 +19,9 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
index 586d841..eb4dfee 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
@@ -36,10 +36,10 @@ import java.util.Properties;
 
 /**
  * A fetcher that fetches data from Kafka brokers via the Kafka 0.10 consumer API.
- * 
+ *
  * <p>This fetcher re-uses basically all functionality of the 0.9 fetcher. It only additionally
  * takes the KafkaRecord-attached timestamp and attaches it to the Flink records.
- * 
+ *
  * @param <T> The type of elements produced by the fetcher.
  */
 public class Kafka010Fetcher<T> extends Kafka09Fetcher<T> {
@@ -57,8 +57,7 @@ public class Kafka010Fetcher<T> extends Kafka09Fetcher<T> {
 			KeyedDeserializationSchema<T> deserializer,
 			Properties kafkaProperties,
 			long pollTimeout,
-			boolean useMetrics) throws Exception
-	{
+			boolean useMetrics) throws Exception {
 		super(
 				sourceContext,
 				assignedPartitionsWithInitialOffsets,
@@ -88,7 +87,7 @@ public class Kafka010Fetcher<T> extends Kafka09Fetcher<T> {
 
 	/**
 	 * This method needs to be overridden because Kafka broke binary compatibility between 0.9 and 0.10,
-	 * changing binary signatures
+	 * changing binary signatures.
 	 */
 	@Override
 	protected KafkaConsumerCallBridge010 createCallBridge() {

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
index 0fda9a6..b621140 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
@@ -26,11 +26,11 @@ import java.util.List;
 
 /**
  * The ConsumerCallBridge simply calls the {@link KafkaConsumer#assign(java.util.Collection)} method.
- * 
- * This indirection is necessary, because Kafka broke binary compatibility between 0.9 and 0.10,
+ *
+ * <p>This indirection is necessary, because Kafka broke binary compatibility between 0.9 and 0.10,
  * changing {@code assign(List)} to {@code assign(Collection)}.
- * 
- * Because of that, we need two versions whose compiled code goes against different method signatures.
+ *
+ * <p>Because of that, we need two versions whose compiled code goes against different method signatures.
  */
 public class KafkaConsumerCallBridge010 extends KafkaConsumerCallBridge {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/main/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/resources/log4j.properties b/flink-connectors/flink-connector-kafka-0.10/src/main/resources/log4j.properties
index 6bdfb48..6eef174 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/resources/log4j.properties
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/resources/log4j.properties
@@ -26,4 +26,3 @@ log4j.appender.testlogger.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
 # suppress the irrelevant (wrong) warnings from the netty channel handler
 log4j.logger.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, testlogger
 
-

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSourceTest.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSourceTest.java
index ed93725..025fefc 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSourceTest.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSourceTest.java
@@ -18,12 +18,16 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.streaming.util.serialization.AvroRowDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.types.Row;
 
+import java.util.Properties;
+
+/**
+ * Tests for the {@link Kafka010AvroTableSource}.
+ */
 public class Kafka010AvroTableSourceTest extends KafkaTableSourceTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010FetcherTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010FetcherTest.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010FetcherTest.java
index 2d0551d..aedd4ba 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010FetcherTest.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010FetcherTest.java
@@ -39,10 +39,8 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.clients.consumer.OffsetCommitCallback;
 import org.apache.kafka.common.TopicPartition;
-
 import org.junit.Test;
 import org.junit.runner.RunWith;
-
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -63,7 +61,6 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyLong;
 import static org.powermock.api.mockito.PowerMockito.doAnswer;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ITCase.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ITCase.java
index add623e..22193b7 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ITCase.java
@@ -38,13 +38,17 @@ import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper;
 import org.apache.flink.streaming.util.serialization.TypeInformationSerializationSchema;
+
 import org.junit.Test;
 
 import javax.annotation.Nullable;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 
-
+/**
+ * IT cases for Kafka 0.10 .
+ */
 public class Kafka010ITCase extends KafkaConsumerTestBase {
 
 	// ------------------------------------------------------------------------
@@ -83,7 +87,6 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 		runFailOnDeployTest();
 	}
 
-
 	// --- source to partition mappings and exactly once ---
 
 	@Test(timeout = 60000)
@@ -170,7 +173,7 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 	}
 
 	/**
-	 * Kafka 0.10 specific test, ensuring Timestamps are properly written to and read from Kafka
+	 * Kafka 0.10 specific test, ensuring Timestamps are properly written to and read from Kafka.
 	 */
 	@Test(timeout = 60000)
 	public void testTimestamps() throws Exception {
@@ -193,9 +196,9 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 			@Override
 			public void run(SourceContext<Long> ctx) throws Exception {
 				long i = 0;
-				while(running) {
-					ctx.collectWithTimestamp(i, i*2);
-					if(i++ == 1000L) {
+				while (running) {
+					ctx.collectWithTimestamp(i, i * 2);
+					if (i++ == 1000L) {
 						running = false;
 					}
 				}
@@ -213,7 +216,7 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 
 			@Override
 			public int partition(Long next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
-				return (int)(next % 3);
+				return (int) (next % 3);
 			}
 		});
 		prod.setParallelism(3);
@@ -235,7 +238,7 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 			@Nullable
 			@Override
 			public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
-				if(lastElement % 10 == 0) {
+				if (lastElement % 10 == 0) {
 					return new Watermark(lastElement);
 				}
 				return null;
@@ -278,7 +281,7 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 		@Override
 		public void processElement(StreamRecord<Long> element) throws Exception {
 			elCount++;
-			if(element.getValue() * 2 != element.getTimestamp()) {
+			if (element.getValue() * 2 != element.getTimestamp()) {
 				throw new RuntimeException("Invalid timestamp: " + element);
 			}
 		}
@@ -287,13 +290,13 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 		public void processWatermark(Watermark mark) throws Exception {
 			wmCount++;
 
-			if(lastWM <= mark.getTimestamp()) {
+			if (lastWM <= mark.getTimestamp()) {
 				lastWM = mark.getTimestamp();
 			} else {
 				throw new RuntimeException("Received watermark higher than the last one");
 			}
 
-			if( mark.getTimestamp() % 10 != 0 && mark.getTimestamp() != Long.MAX_VALUE ) {
+			if (mark.getTimestamp() % 10 != 0 && mark.getTimestamp() != Long.MAX_VALUE) {
 				throw new RuntimeException("Invalid watermark: " + mark.getTimestamp());
 			}
 		}
@@ -301,11 +304,11 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 		@Override
 		public void close() throws Exception {
 			super.close();
-			if(elCount != 1000L) {
+			if (elCount != 1000L) {
 				throw new RuntimeException("Wrong final element count " + elCount);
 			}
 
-			if(wmCount <= 2) {
+			if (wmCount <= 2) {
 				throw new RuntimeException("Almost no watermarks have been sent " + wmCount);
 			}
 		}
@@ -322,6 +325,7 @@ public class Kafka010ITCase extends KafkaConsumerTestBase {
 			this.ti = TypeInfoParser.parse("Long");
 			this.ser = ti.createSerializer(new ExecutionConfig());
 		}
+
 		@Override
 		public TypeInformation<Long> getProducedType() {
 			return ti;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSourceTest.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSourceTest.java
index 55e8b9c..092f5ea 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSourceTest.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSourceTest.java
@@ -18,12 +18,16 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
 import org.apache.flink.types.Row;
 
+import java.util.Properties;
+
+/**
+ * Tests for the {@link Kafka010JsonTableSource}.
+ */
 public class Kafka010JsonTableSourceTest extends KafkaTableSourceTestBase {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ProducerITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ProducerITCase.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ProducerITCase.java
index 42b9682..64a5a3f 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ProducerITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010ProducerITCase.java
@@ -18,10 +18,11 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-
 import org.junit.Test;
 
-
+/**
+ * IT cases for the {@link FlinkKafkaProducer010}.
+ */
 @SuppressWarnings("serial")
 public class Kafka010ProducerITCase extends KafkaProducerTestBase {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
index c88c858..cb30fbf 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java
@@ -15,9 +15,17 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.operators.StreamSink;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.connectors.kafka.testutils.ZooKeeperStringSerializer;
+import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
+import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
+import org.apache.flink.util.NetUtils;
+
 import kafka.admin.AdminUtils;
 import kafka.common.KafkaException;
 import kafka.server.KafkaConfig;
@@ -27,14 +35,6 @@ import kafka.utils.ZkUtils;
 import org.I0Itec.zkclient.ZkClient;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingServer;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.datastream.DataStreamSink;
-import org.apache.flink.streaming.api.operators.StreamSink;
-import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.streaming.connectors.kafka.testutils.ZooKeeperStringSerializer;
-import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
-import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
-import org.apache.flink.util.NetUtils;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.TopicPartition;
@@ -57,7 +57,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
- * An implementation of the KafkaServerProvider for Kafka 0.10
+ * An implementation of the KafkaServerProvider for Kafka 0.10 .
  */
 public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 
@@ -87,7 +87,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 	@Override
 	public Properties getSecureProperties() {
 		Properties prop = new Properties();
-		if(secureMode) {
+		if (secureMode) {
 			prop.put("security.inter.broker.protocol", "SASL_PLAINTEXT");
 			prop.put("security.protocol", "SASL_PLAINTEXT");
 			prop.put("sasl.kerberos.service.name", "kafka");
@@ -95,7 +95,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 			//add special timeout for Travis
 			prop.setProperty("zookeeper.session.timeout.ms", String.valueOf(zkTimeout));
 			prop.setProperty("zookeeper.connection.timeout.ms", String.valueOf(zkTimeout));
-			prop.setProperty("metadata.fetch.timeout.ms","120000");
+			prop.setProperty("metadata.fetch.timeout.ms", "120000");
 		}
 		return prop;
 	}
@@ -122,7 +122,6 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		return new StreamSink<>(prod);
 	}
 
-
 	@Override
 	public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
 		FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner);
@@ -176,7 +175,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 	@Override
 	public void prepare(int numKafkaServers, Properties additionalServerProperties, boolean secureMode) {
 		//increase the timeout since in Travis ZK connection takes long time for secure connection.
-		if(secureMode) {
+		if (secureMode) {
 			//run only one kafka server to avoid multiple ZK connections from many instances - Travis timeout
 			numKafkaServers = 1;
 			zkTimeout = zkTimeout * 15;
@@ -203,7 +202,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		brokers = null;
 
 		try {
-			zookeeper = new TestingServer(-	1, tmpZkDir);
+			zookeeper = new TestingServer(-1, tmpZkDir);
 			zookeeperConnectionString = zookeeper.getConnectString();
 			LOG.info("Starting Zookeeper with zookeeperConnectionString: {}", zookeeperConnectionString);
 
@@ -213,7 +212,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 			for (int i = 0; i < numKafkaServers; i++) {
 				brokers.add(getKafkaServer(i, tmpKafkaDirs.get(i)));
 
-				if(secureMode) {
+				if (secureMode) {
 					brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.SASL_PLAINTEXT)) + ",";
 				} else {
 					brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.PLAINTEXT)) + ",";
@@ -299,7 +298,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		final long deadline = System.nanoTime() + 30_000_000_000L;
 		do {
 			try {
-				if(secureMode) {
+				if (secureMode) {
 					//increase wait time since in Travis ZK timeout occurs frequently
 					int wait = zkTimeout / 100;
 					LOG.info("waiting for {} msecs before the topic {} can be checked", wait, topic);
@@ -315,7 +314,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 
 			// create a new ZK utils connection
 			ZkUtils checkZKConn = getZkUtils();
-			if(AdminUtils.topicExists(checkZKConn, topic)) {
+			if (AdminUtils.topicExists(checkZKConn, topic)) {
 				checkZKConn.close();
 				return;
 			}
@@ -343,7 +342,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 	}
 
 	/**
-	 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed)
+	 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 	 */
 	protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
 		Properties kafkaProperties = new Properties();
@@ -359,7 +358,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 		// for CI stability, increase zookeeper session timeout
 		kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
 		kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
-		if(additionalServerProperties != null) {
+		if (additionalServerProperties != null) {
 			kafkaProperties.putAll(additionalServerProperties);
 		}
 
@@ -370,7 +369,7 @@ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
 			kafkaProperties.put("port", Integer.toString(kafkaPort));
 
 			//to support secure kafka cluster
-			if(secureMode) {
+			if (secureMode) {
 				LOG.info("Adding Kafka secure configurations");
 				kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
 				kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/pom.xml b/flink-connectors/flink-connector-kafka-0.8/pom.xml
index 5e2ed2d..b6b0336 100644
--- a/flink-connectors/flink-connector-kafka-0.8/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.8/pom.xml
@@ -175,7 +175,6 @@ under the License.
 
 	</dependencies>
 
-
 	<build>
 		<plugins>
 			<plugin>
@@ -215,5 +214,5 @@ under the License.
 			</plugin>
 		</plugins>
 	</build>
-	
+
 </project>

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
index 858a790..6c7b94d 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
@@ -17,13 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import kafka.cluster.Broker;
-import kafka.common.ErrorMapping;
-import kafka.javaapi.PartitionMetadata;
-import kafka.javaapi.TopicMetadata;
-import kafka.javaapi.TopicMetadataRequest;
-import kafka.javaapi.consumer.SimpleConsumer;
-
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
 import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
@@ -39,6 +32,12 @@ import org.apache.flink.util.NetUtils;
 import org.apache.flink.util.PropertiesUtil;
 import org.apache.flink.util.SerializedValue;
 
+import kafka.cluster.Broker;
+import kafka.common.ErrorMapping;
+import kafka.javaapi.PartitionMetadata;
+import kafka.javaapi.TopicMetadata;
+import kafka.javaapi.TopicMetadataRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.common.Node;
 
@@ -46,25 +45,25 @@ import java.net.InetAddress;
 import java.net.URL;
 import java.net.UnknownHostException;
 import java.nio.channels.ClosedChannelException;
-import java.util.Collections;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Random;
 
-import static org.apache.flink.util.PropertiesUtil.getInt;
 import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.flink.util.PropertiesUtil.getInt;
 
 /**
  * The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
  * Apache Kafka 0.8.x. The consumer can run in multiple parallel instances, each of which will pull
- * data from one or more Kafka partitions. 
- * 
+ * data from one or more Kafka partitions.
+ *
  * <p>The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
- * during a failure, and that the computation processes elements "exactly once". 
+ * during a failure, and that the computation processes elements "exactly once".
  * (Note: These guarantees naturally assume that Kafka itself does not loose any data.)</p>
- * 
+ *
  * <p>Flink's Kafka Consumer is designed to be compatible with Kafka's High-Level Consumer API (0.8.x).
  * Most of Kafka's configuration variables can be used with this consumer as well:
  *         <ul>
@@ -74,11 +73,9 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *             <li>auto.offset.reset with the values "largest", "smallest"</li>
  *             <li>fetch.wait.max.ms</li>
  *         </ul>
- *     </li>
- * </ul>
- * 
+ *
  * <h1>Offset handling</h1>
- * 
+ *
  * <p>Offsets whose records have been read and are checkpointed will be committed back to ZooKeeper
  * by the offset handler. In addition, the offset handler finds the point where the source initially
  * starts reading from the stream, when the streaming job is started.</p>
@@ -93,7 +90,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * <p>When using a Kafka topic to send data between Flink jobs, we recommend using the
  * {@see TypeInformationSerializationSchema} and {@see TypeInformationKeyValueSerializationSchema}.</p>
- * 
+ *
  * <p><b>NOTE:</b> The implementation currently accesses partition metadata when the consumer
  * is constructed. That means that the client that submits the program needs to be able to
  * reach the Kafka brokers or ZooKeeper.</p>
@@ -102,7 +99,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 
 	private static final long serialVersionUID = -6272159445203409112L;
 
-	/** Configuration key for the number of retries for getting the partition info */
+	/** Configuration key for the number of retries for getting the partition info. */
 	public static final String GET_PARTITIONS_RETRIES_KEY = "flink.get-partitions.retry";
 
 	/** Default number of retries for getting the partition info. One retry means going through the full list of brokers */
@@ -110,13 +107,13 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 
 	// ------------------------------------------------------------------------
 
-	/** The properties to parametrize the Kafka consumer and ZooKeeper client */ 
+	/** The properties to parametrize the Kafka consumer and ZooKeeper client. */
 	private final Properties kafkaProperties;
 
 	// ------------------------------------------------------------------------
 
 	/**
-	 * Creates a new Kafka streaming source consumer for Kafka 0.8.x
+	 * Creates a new Kafka streaming source consumer for Kafka 0.8.x.
 	 *
 	 * @param topic
 	 *           The name of the topic that should be consumed.
@@ -132,7 +129,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.8.x
 	 *
-	 * This constructor allows passing a {@see KeyedDeserializationSchema} for reading key/value
+	 * <p>This constructor allows passing a {@see KeyedDeserializationSchema} for reading key/value
 	 * pairs, offsets, and topic names from Kafka.
 	 *
 	 * @param topic
@@ -149,7 +146,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.8.x
 	 *
-	 * This constructor allows passing multiple topics to the consumer.
+	 * <p>This constructor allows passing multiple topics to the consumer.
 	 *
 	 * @param topics
 	 *           The Kafka topics to read from.
@@ -165,8 +162,8 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Creates a new Kafka streaming source consumer for Kafka 0.8.x
 	 *
-	 * This constructor allows passing multiple topics and a key/value deserialization schema.
-	 * 
+	 * <p>This constructor allows passing multiple topics and a key/value deserialization schema.
+	 *
 	 * @param topics
 	 *           The Kafka topics to read from.
 	 * @param deserializer
@@ -245,14 +242,14 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 
 	/**
 	 * Send request to Kafka to get partitions for topic.
-	 * 
+	 *
 	 * @param topics The name of the topics.
-	 * @param properties The properties for the Kafka Consumer that is used to query the partitions for the topic. 
+	 * @param properties The properties for the Kafka Consumer that is used to query the partitions for the topic.
 	 */
 	public static List<KafkaTopicPartitionLeader> getPartitionsForTopic(List<String> topics, Properties properties) {
 		String seedBrokersConfString = properties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
 		final int numRetries = getInt(properties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES);
-		
+
 		checkNotNull(seedBrokersConfString, "Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
 		String[] seedBrokers = seedBrokersConfString.split(",");
 		List<KafkaTopicPartitionLeader> partitions = new ArrayList<>();
@@ -328,7 +325,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 	}
 
 	/**
-	 * Turn a broker instance into a node instance
+	 * Turn a broker instance into a node instance.
 	 * @param broker broker instance
 	 * @return Node representing the given broker
 	 */
@@ -337,7 +334,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 	}
 
 	/**
-	 * Validate the ZK configuration, checking for required parameters
+	 * Validate the ZK configuration, checking for required parameters.
 	 * @param props Properties to check
 	 */
 	protected static void validateZooKeeperConfig(Properties props) {
@@ -348,7 +345,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 			throw new IllegalArgumentException("Required property '" + ConsumerConfig.GROUP_ID_CONFIG
 					+ "' has not been set in the properties");
 		}
-		
+
 		try {
 			//noinspection ResultOfMethodCallIgnored
 			Integer.parseInt(props.getProperty("zookeeper.session.timeout.ms", "0"));
@@ -356,7 +353,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 		catch (NumberFormatException e) {
 			throw new IllegalArgumentException("Property 'zookeeper.session.timeout.ms' is not a valid integer");
 		}
-		
+
 		try {
 			//noinspection ResultOfMethodCallIgnored
 			Integer.parseInt(props.getProperty("zookeeper.connection.timeout.ms", "0"));
@@ -369,7 +366,7 @@ public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 	/**
 	 * Validate that at least one seed broker is valid in case of a
 	 * ClosedChannelException.
-	 * 
+	 *
 	 * @param seedBrokers
 	 *            array containing the seed brokers e.g. ["host1:port1",
 	 *            "host2:port2"]

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer081.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer081.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer081.java
index 4e4050f..4102bf8 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer081.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer081.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer082.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer082.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer082.java
index aeefcc8..7ba5103 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer082.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer082.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer.java
index 98dac3e..434286e 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer.java
@@ -22,8 +22,8 @@ import org.apache.flink.streaming.connectors.kafka.partitioner.KafkaPartitioner;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
 import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
-import java.util.Properties;
 
+import java.util.Properties;
 
 /**
  * THIS CLASS IS DEPRECATED. Use FlinkKafkaProducer08 instead.
@@ -38,7 +38,7 @@ public class FlinkKafkaProducer<IN> extends FlinkKafkaProducer08<IN>  {
 	 */
 	@Deprecated
 	public FlinkKafkaProducer(String brokerList, String topicId, SerializationSchema<IN> serializationSchema) {
-		super(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), getPropertiesFromBrokerList(brokerList), (FlinkKafkaPartitioner<IN>)null);
+		super(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), getPropertiesFromBrokerList(brokerList), (FlinkKafkaPartitioner<IN>) null);
 	}
 
 	/**
@@ -46,7 +46,7 @@ public class FlinkKafkaProducer<IN> extends FlinkKafkaProducer08<IN>  {
 	 */
 	@Deprecated
 	public FlinkKafkaProducer(String topicId, SerializationSchema<IN> serializationSchema, Properties producerConfig) {
-		super(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), producerConfig, (FlinkKafkaPartitioner<IN>)null);
+		super(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), producerConfig, (FlinkKafkaPartitioner<IN>) null);
 	}
 
 	/**
@@ -63,7 +63,7 @@ public class FlinkKafkaProducer<IN> extends FlinkKafkaProducer08<IN>  {
 	 */
 	@Deprecated
 	public FlinkKafkaProducer(String brokerList, String topicId, KeyedSerializationSchema<IN> serializationSchema) {
-		super(topicId, serializationSchema, getPropertiesFromBrokerList(brokerList), (FlinkKafkaPartitioner<IN>)null);
+		super(topicId, serializationSchema, getPropertiesFromBrokerList(brokerList), (FlinkKafkaPartitioner<IN>) null);
 	}
 
 	/**
@@ -71,7 +71,7 @@ public class FlinkKafkaProducer<IN> extends FlinkKafkaProducer08<IN>  {
 	 */
 	@Deprecated
 	public FlinkKafkaProducer(String topicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig) {
-		super(topicId, serializationSchema, producerConfig, (FlinkKafkaPartitioner<IN>)null);
+		super(topicId, serializationSchema, producerConfig, (FlinkKafkaPartitioner<IN>) null);
 	}
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
index 08dcb2f..a14768b 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
@@ -27,11 +27,10 @@ import org.apache.flink.streaming.util.serialization.SerializationSchema;
 
 import java.util.Properties;
 
-
 /**
  * Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.8.
  *
- * Please note that this producer does not have any reliability guarantees.
+ * <p>Please note that this producer does not have any reliability guarantees.
  *
  * @param <IN> Type of the messages to write into Kafka.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
index 1a68c05..a1bea78 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
@@ -18,13 +18,15 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import java.util.Properties;
-import org.apache.avro.specific.SpecificRecord;
-import org.apache.avro.specific.SpecificRecordBase;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
 import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.types.Row;
 
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.avro.specific.SpecificRecordBase;
+
+import java.util.Properties;
+
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.8.
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
index 80bd180..79406d8 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
-import org.apache.flink.types.Row;
 import org.apache.flink.streaming.connectors.kafka.partitioner.KafkaPartitioner;
 import org.apache.flink.streaming.util.serialization.SerializationSchema;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 
@@ -29,9 +30,9 @@ import java.util.Properties;
  * Kafka 0.8 {@link KafkaTableSink} that serializes data in JSON format.
  */
 public class Kafka08JsonTableSink extends KafkaJsonTableSink {
-	
+
 	/**
-	 * Creates {@link KafkaTableSink} for Kafka 0.8
+	 * Creates {@link KafkaTableSink} for Kafka 0.8.
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
@@ -42,7 +43,7 @@ public class Kafka08JsonTableSink extends KafkaJsonTableSink {
 	}
 
 	/**
-	 * Creates {@link KafkaTableSink} for Kafka 0.8
+	 * Creates {@link KafkaTableSink} for Kafka 0.8.
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
index 1555a3b..05a2c71 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
@@ -19,9 +19,9 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
index e1e481c..9536306 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
@@ -19,9 +19,9 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.types.Row;
-import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.streaming.util.serialization.DeserializationSchema;
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
 
 import java.util.Properties;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/28e8043b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
index e31dcac..da61dd0 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
@@ -38,27 +38,27 @@ import static java.util.Objects.requireNonNull;
  *         are available and mark themselves as shut down.</li>
  *     <li>The queue allows to poll batches of elements in one polling call.</li>
  * </ol>
- * 
- * The queue has no capacity restriction and is safe for multiple producers and consumers.
- * 
+ *
+ * <p>The queue has no capacity restriction and is safe for multiple producers and consumers.
+ *
  * <p>Note: Null elements are prohibited.
- * 
+ *
  * @param <E> The type of elements in the queue.
  */
 public class ClosableBlockingQueue<E> {
 
-	/** The lock used to make queue accesses and open checks atomic */
+	/** The lock used to make queue accesses and open checks atomic. */
 	private final ReentrantLock lock;
-	
-	/** The condition on which blocking get-calls wait if the queue is empty */
+
+	/** The condition on which blocking get-calls wait if the queue is empty. */
 	private final Condition nonEmpty;
-	
-	/** The deque of elements */
+
+	/** The deque of elements. */
 	private final ArrayDeque<E> elements;
-	
-	/** Flag marking the status of the queue */
+
+	/** Flag marking the status of the queue. */
 	private volatile boolean open;
-	
+
 	// ------------------------------------------------------------------------
 
 	/**
@@ -72,22 +72,21 @@ public class ClosableBlockingQueue<E> {
 	 * Creates a new empty queue, reserving space for at least the specified number
 	 * of elements. The queu can still grow, of more elements are added than the
 	 * reserved space.
-	 * 
+	 *
 	 * @param initialSize The number of elements to reserve space for.
 	 */
 	public ClosableBlockingQueue(int initialSize) {
 		this.lock = new ReentrantLock(true);
 		this.nonEmpty = this.lock.newCondition();
-		
+
 		this.elements = new ArrayDeque<>(initialSize);
 		this.open = true;
-		
-		
+
 	}
 
 	/**
 	 * Creates a new queue that contains the given elements.
-	 * 
+	 *
 	 * @param initialElements The elements to initially add to the queue.
 	 */
 	public ClosableBlockingQueue(Collection<? extends E> initialElements) {
@@ -127,7 +126,7 @@ public class ClosableBlockingQueue<E> {
 	public boolean isOpen() {
 		return open;
 	}
-	
+
 	/**
 	 * Tries to close the queue. Closing the queue only succeeds when no elements are
 	 * in the queue when this method is called. Checking whether the queue is empty, and
@@ -155,25 +154,25 @@ public class ClosableBlockingQueue<E> {
 			lock.unlock();
 		}
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Adding / Removing elements
 	// ------------------------------------------------------------------------
-	
+
 	/**
 	 * Tries to add an element to the queue, if the queue is still open. Checking whether the queue
 	 * is open and adding the element is one atomic operation.
-	 * 
+	 *
 	 * <p>Unlike the {@link #add(Object)} method, this method never throws an exception,
 	 * but only indicates via the return code if the element was added or the
 	 * queue was closed.
-	 * 
+	 *
 	 * @param element The element to add.
 	 * @return True, if the element was added, false if the queue was closes.
 	 */
 	public boolean addIfOpen(E element) {
 		requireNonNull(element);
-		
+
 		lock.lock();
 		try {
 			if (open) {
@@ -191,7 +190,7 @@ public class ClosableBlockingQueue<E> {
 	/**
 	 * Adds the element to the queue, or fails with an exception, if the queue is closed.
 	 * Checking whether the queue is open and adding the element is one atomic operation.
-	 * 
+	 *
 	 * @param element The element to add.
 	 * @throws IllegalStateException Thrown, if the queue is closed.
 	 */
@@ -215,13 +214,13 @@ public class ClosableBlockingQueue<E> {
 
 	/**
 	 * Returns the queue's next element without removing it, if the queue is non-empty.
-	 * Otherwise, returns null. 
+	 * Otherwise, returns null.
 	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and getting the next element is one atomic operation.
-	 * 
+	 *
 	 * <p>This method never blocks.
-	 * 
+	 *
 	 * @return The queue's next element, or null, if the queue is empty.
 	 * @throws IllegalStateException Thrown, if the queue is closed.
 	 */
@@ -244,7 +243,7 @@ public class ClosableBlockingQueue<E> {
 
 	/**
 	 * Returns the queue's next element and removes it, the queue is non-empty.
-	 * Otherwise, this method returns null. 
+	 * Otherwise, this method returns null.
 	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and removing the next element is one atomic operation.
@@ -273,7 +272,7 @@ public class ClosableBlockingQueue<E> {
 
 	/**
 	 * Returns all of the queue's current elements in a list, if the queue is non-empty.
-	 * Otherwise, this method returns null. 
+	 * Otherwise, this method returns null.
 	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and removing the elements is one atomic operation.
@@ -305,12 +304,12 @@ public class ClosableBlockingQueue<E> {
 	/**
 	 * Returns the next element in the queue. If the queue is empty, this method
 	 * waits until at least one element is added.
-	 * 
+	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and removing the next element is one atomic operation.
-	 * 
+	 *
 	 * @return The next element in the queue, never null.
-	 * 
+	 *
 	 * @throws IllegalStateException Thrown, if the queue is closed.
 	 * @throws InterruptedException Throw, if the thread is interrupted while waiting for an
 	 *                              element to be added.
@@ -321,7 +320,7 @@ public class ClosableBlockingQueue<E> {
 			while (open && elements.isEmpty()) {
 				nonEmpty.await();
 			}
-			
+
 			if (open) {
 				return elements.removeFirst();
 			} else {
@@ -336,13 +335,13 @@ public class ClosableBlockingQueue<E> {
 	 * Returns the next element in the queue. If the queue is empty, this method
 	 * waits at most a certain time until an element becomes available. If no element
 	 * is available after that time, the method returns null.
-	 * 
+	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and removing the next element is one atomic operation.
-	 * 
+	 *
 	 * @param timeoutMillis The number of milliseconds to block, at most.
 	 * @return The next element in the queue, or null, if the timeout expires  before an element is available.
-	 * 
+	 *
 	 * @throws IllegalStateException Thrown, if the queue is closed.
 	 * @throws InterruptedException Throw, if the thread is interrupted while waiting for an
 	 *                              element to be added.
@@ -354,16 +353,16 @@ public class ClosableBlockingQueue<E> {
 		} else if (timeoutMillis < 0L) {
 			throw new IllegalArgumentException("invalid timeout");
 		}
-		
+
 		final long deadline = System.nanoTime() + timeoutMillis * 1_000_000L;
-		
+
 		lock.lock();
 		try {
-			while (open && elements.isEmpty() && timeoutMillis > 0) { 
+			while (open && elements.isEmpty() && timeoutMillis > 0) {
 				nonEmpty.await(timeoutMillis, TimeUnit.MILLISECONDS);
 				timeoutMillis = (deadline - System.nanoTime()) / 1_000_000L;
 			}
-			
+
 			if (!open) {
 				throw new IllegalStateException("queue is closed");
 			}
@@ -383,12 +382,12 @@ public class ClosableBlockingQueue<E> {
 	 * at least one element is added.
 	 *
 	 * <p>This method always returns a list with at least one element.
-	 * 
+	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and removing the next element is one atomic operation.
-	 * 
+	 *
 	 * @return A list with all elements in the queue, always at least one element.
-	 * 
+	 *
 	 * @throws IllegalStateException Thrown, if the queue is closed.
 	 * @throws InterruptedException Throw, if the thread is interrupted while waiting for an
 	 *                              element to be added.
@@ -415,13 +414,13 @@ public class ClosableBlockingQueue<E> {
 	 * Gets all the elements found in the list, or blocks until at least one element
 	 * was added. This method is similar as {@link #getBatchBlocking()}, but takes
 	 * a number of milliseconds that the method will maximally wait before returning.
-	 * 
+	 *
 	 * <p>This method never returns null, but an empty list, if the queue is empty when
 	 * the method is called and the request times out before an element was added.
-	 * 
+	 *
 	 * <p>The method throws an {@code IllegalStateException} if the queue is closed.
 	 * Checking whether the queue is open and removing the next element is one atomic operation.
-	 * 
+	 *
 	 * @param timeoutMillis The number of milliseconds to wait, at most.
 	 * @return A list with all elements in the queue, possible an empty list.
 	 *
@@ -461,11 +460,11 @@ public class ClosableBlockingQueue<E> {
 			lock.unlock();
 		}
 	}
-	
+
 	// ------------------------------------------------------------------------
 	//  Standard Utilities
 	// ------------------------------------------------------------------------
-	
+
 	@Override
 	public int hashCode() {
 		int hashCode = 17;
@@ -482,7 +481,7 @@ public class ClosableBlockingQueue<E> {
 		} else if (obj != null && obj.getClass() == ClosableBlockingQueue.class) {
 			@SuppressWarnings("unchecked")
 			ClosableBlockingQueue<E> that = (ClosableBlockingQueue<E>) obj;
-			
+
 			if (this.elements.size() == that.elements.size()) {
 				Iterator<E> thisElements = this.elements.iterator();
 				for (E thatNext : that.elements) {


[02/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-nifi

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connector-nifi


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/7ac4a244
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/7ac4a244
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/7ac4a244

Branch: refs/heads/master
Commit: 7ac4a24431d65c6b8de889a03bbcee11afcec9b0
Parents: 8591035
Author: zentol <ch...@apache.org>
Authored: Wed May 24 22:31:36 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:05 2017 +0200

----------------------------------------------------------------------
 .../apache/flink/streaming/connectors/nifi/NiFiDataPacket.java  | 5 ++---
 .../flink/streaming/connectors/nifi/NiFiDataPacketBuilder.java  | 1 +
 .../org/apache/flink/streaming/connectors/nifi/NiFiSink.java    | 2 ++
 .../org/apache/flink/streaming/connectors/nifi/NiFiSource.java  | 2 +-
 .../flink/streaming/connectors/nifi/StandardNiFiDataPacket.java | 3 ++-
 .../connectors/nifi/examples/NiFiSinkTopologyExample.java       | 4 +++-
 .../connectors/nifi/examples/NiFiSourceTopologyExample.java     | 2 ++
 7 files changed, 13 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacket.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacket.java b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacket.java
index c8ceb57..93fef2c 100644
--- a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacket.java
+++ b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacket.java
@@ -14,15 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.nifi;
 
 import java.util.Map;
 
 /**
- * <p>
  * The NiFiDataPacket provides a packaging around a NiFi FlowFile. It wraps both
  * a FlowFile's content and its attributes so that they can be processed by Flink.
- * </p>
  */
 public interface NiFiDataPacket {
 
@@ -36,4 +35,4 @@ public interface NiFiDataPacket {
 	 */
 	Map<String, String> getAttributes();
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacketBuilder.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacketBuilder.java b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacketBuilder.java
index 9bb521b..fd311bd 100644
--- a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacketBuilder.java
+++ b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiDataPacketBuilder.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.nifi;
 
 import org.apache.flink.api.common.functions.Function;

http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSink.java b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSink.java
index abc6b35..e46c9ee 100644
--- a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSink.java
+++ b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSink.java
@@ -15,10 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.nifi;
 
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
+
 import org.apache.nifi.remote.Transaction;
 import org.apache.nifi.remote.TransferDirection;
 import org.apache.nifi.remote.client.SiteToSiteClient;

http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSource.java b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSource.java
index 57c59ec..ba41169 100644
--- a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSource.java
+++ b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/NiFiSource.java
@@ -21,13 +21,13 @@ package org.apache.flink.streaming.connectors.nifi;
 import org.apache.flink.api.common.functions.StoppableFunction;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
+
 import org.apache.nifi.remote.Transaction;
 import org.apache.nifi.remote.TransferDirection;
 import org.apache.nifi.remote.client.SiteToSiteClient;
 import org.apache.nifi.remote.client.SiteToSiteClientConfig;
 import org.apache.nifi.remote.protocol.DataPacket;
 import org.apache.nifi.stream.io.StreamUtils;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/StandardNiFiDataPacket.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/StandardNiFiDataPacket.java b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/StandardNiFiDataPacket.java
index 5ad4bae..3a440d8 100644
--- a/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/StandardNiFiDataPacket.java
+++ b/flink-connectors/flink-connector-nifi/src/main/java/org/apache/flink/streaming/connectors/nifi/StandardNiFiDataPacket.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.nifi;
 
 import java.io.Serializable;
@@ -43,4 +44,4 @@ public class StandardNiFiDataPacket implements NiFiDataPacket, Serializable {
 		return attributes;
 	}
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSinkTopologyExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSinkTopologyExample.java b/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSinkTopologyExample.java
index 202e80a..d4208a0 100644
--- a/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSinkTopologyExample.java
+++ b/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSinkTopologyExample.java
@@ -14,6 +14,7 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
+
 package org.apache.flink.streaming.connectors.nifi.examples;
 
 import org.apache.flink.api.common.functions.RuntimeContext;
@@ -24,6 +25,7 @@ import org.apache.flink.streaming.connectors.nifi.NiFiDataPacket;
 import org.apache.flink.streaming.connectors.nifi.NiFiDataPacketBuilder;
 import org.apache.flink.streaming.connectors.nifi.NiFiSink;
 import org.apache.flink.streaming.connectors.nifi.StandardNiFiDataPacket;
+
 import org.apache.nifi.remote.client.SiteToSiteClient;
 import org.apache.nifi.remote.client.SiteToSiteClientConfig;
 
@@ -47,7 +49,7 @@ public class NiFiSinkTopologyExample {
 					@Override
 					public NiFiDataPacket createNiFiDataPacket(String s, RuntimeContext ctx) {
 						return new StandardNiFiDataPacket(s.getBytes(ConfigConstants.DEFAULT_CHARSET),
-							new HashMap<String,String>());
+							new HashMap<String, String>());
 					}
 				}));
 

http://git-wip-us.apache.org/repos/asf/flink/blob/7ac4a244/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSourceTopologyExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSourceTopologyExample.java b/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSourceTopologyExample.java
index 79c9a1c..53b0c22 100644
--- a/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSourceTopologyExample.java
+++ b/flink-connectors/flink-connector-nifi/src/test/java/org/apache/flink/streaming/connectors/nifi/examples/NiFiSourceTopologyExample.java
@@ -14,6 +14,7 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
+
 package org.apache.flink.streaming.connectors.nifi.examples;
 
 import org.apache.flink.api.common.functions.MapFunction;
@@ -22,6 +23,7 @@ import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.nifi.NiFiDataPacket;
 import org.apache.flink.streaming.connectors.nifi.NiFiSource;
+
 import org.apache.nifi.remote.client.SiteToSiteClient;
 import org.apache.nifi.remote.client.SiteToSiteClientConfig;
 


[19/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connector-cassandra

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connector-cassandra


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1a3a5b6e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1a3a5b6e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1a3a5b6e

Branch: refs/heads/master
Commit: 1a3a5b6e976d18c49a99870c0f71ebf615a862d3
Parents: 7292c87
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:57:46 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:32 2017 +0200

----------------------------------------------------------------------
 .../cassandra/CassandraInputFormat.java         |  14 +--
 .../cassandra/CassandraOutputFormat.java        |  14 +--
 .../cassandra/AbstractCassandraTupleSink.java   | 105 ++++++++++---------
 .../cassandra/CassandraCommitter.java           |  10 +-
 .../connectors/cassandra/CassandraPojoSink.java |   9 +-
 .../cassandra/CassandraScalaProductSink.java    |  82 +++++++--------
 .../connectors/cassandra/CassandraSink.java     |  39 +++++--
 .../connectors/cassandra/CassandraSinkBase.java |   9 +-
 .../cassandra/CassandraTupleWriteAheadSink.java |  14 +--
 .../connectors/cassandra/ClusterBuilder.java    |   1 +
 .../cassandra/example/BatchExample.java         |  10 +-
 .../cassandra/CassandraConnectorITCase.java     |  34 +++---
 .../CassandraTupleWriteAheadSinkTest.java       |  17 +--
 .../streaming/connectors/cassandra/Pojo.java    |  18 ++--
 .../example/CassandraPojoSinkExample.java       |  11 +-
 .../example/CassandraTupleSinkExample.java      |   8 +-
 .../CassandraTupleWriteAheadSinkExample.java    |  12 ++-
 .../connectors/cassandra/example/Message.java   |   4 +
 .../src/test/resources/log4j-test.properties    |   1 -
 19 files changed, 230 insertions(+), 182 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraInputFormat.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraInputFormat.java
index 849e023..e0806fe 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraInputFormat.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraInputFormat.java
@@ -14,13 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.batch.connectors.cassandra;
 
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Session;
-import com.google.common.base.Strings;
 import org.apache.flink.api.common.io.DefaultInputSplitAssigner;
 import org.apache.flink.api.common.io.NonParallelInput;
 import org.apache.flink.api.common.io.RichInputFormat;
@@ -32,6 +28,12 @@ import org.apache.flink.core.io.InputSplit;
 import org.apache.flink.core.io.InputSplitAssigner;
 import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
 import org.apache.flink.util.Preconditions;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+import com.datastax.driver.core.Session;
+import com.google.common.base.Strings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -121,7 +123,7 @@ public class CassandraInputFormat<OUT extends Tuple> extends RichInputFormat<OUT
 		}
 
 		try {
-			if (cluster != null ) {
+			if (cluster != null) {
 				cluster.close();
 			}
 		} catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraOutputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraOutputFormat.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraOutputFormat.java
index 15d8fb3..c81391d 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraOutputFormat.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/batch/connectors/cassandra/CassandraOutputFormat.java
@@ -14,8 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.batch.connectors.cassandra;
 
+import org.apache.flink.api.common.io.RichOutputFormat;
+import org.apache.flink.api.java.tuple.Tuple;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
+import org.apache.flink.util.Preconditions;
+
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.ResultSet;
@@ -24,11 +31,6 @@ import com.datastax.driver.core.Session;
 import com.google.common.base.Strings;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
-import org.apache.flink.api.common.io.RichOutputFormat;
-import org.apache.flink.api.java.tuple.Tuple;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
-import org.apache.flink.util.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -115,7 +117,7 @@ public class CassandraOutputFormat<OUT extends Tuple> extends RichOutputFormat<O
 		}
 
 		try {
-			if (cluster != null ) {
+			if (cluster != null) {
 				cluster.close();
 			}
 		} catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/AbstractCassandraTupleSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/AbstractCassandraTupleSink.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/AbstractCassandraTupleSink.java
index 7a8d097..fda739e 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/AbstractCassandraTupleSink.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/AbstractCassandraTupleSink.java
@@ -1,52 +1,53 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.connectors.cassandra;
-
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ResultSet;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.flink.configuration.Configuration;
-
-/**
- * Abstract sink to write tuple-like values into a Cassandra cluster.
- *
- * @param <IN> Type of the elements emitted by this sink
- */
-public abstract class AbstractCassandraTupleSink<IN> extends CassandraSinkBase<IN, ResultSet> {
-	private final String insertQuery;
-	private transient PreparedStatement ps;
-
-	public AbstractCassandraTupleSink(String insertQuery, ClusterBuilder builder) {
-		super(builder);
-		this.insertQuery = insertQuery;
-	}
-
-	@Override
-	public void open(Configuration configuration) {
-		super.open(configuration);
-		this.ps = session.prepare(insertQuery);
-	}
-
-	@Override
-	public ListenableFuture<ResultSet> send(IN value) {
-		Object[] fields = extract(value);
-		return session.executeAsync(ps.bind(fields));
-	}
-
-	protected abstract Object[] extract(IN record);
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.cassandra;
+
+import org.apache.flink.configuration.Configuration;
+
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSet;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Abstract sink to write tuple-like values into a Cassandra cluster.
+ *
+ * @param <IN> Type of the elements emitted by this sink
+ */
+public abstract class AbstractCassandraTupleSink<IN> extends CassandraSinkBase<IN, ResultSet> {
+	private final String insertQuery;
+	private transient PreparedStatement ps;
+
+	public AbstractCassandraTupleSink(String insertQuery, ClusterBuilder builder) {
+		super(builder);
+		this.insertQuery = insertQuery;
+	}
+
+	@Override
+	public void open(Configuration configuration) {
+		super.open(configuration);
+		this.ps = session.prepare(insertQuery);
+	}
+
+	@Override
+	public ListenableFuture<ResultSet> send(IN value) {
+		Object[] fields = extract(value);
+		return session.executeAsync(ps.bind(fields));
+	}
+
+	protected abstract Object[] extract(IN record);
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraCommitter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraCommitter.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraCommitter.java
index 63b76da..b3948b2 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraCommitter.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraCommitter.java
@@ -15,13 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra;
 
+import org.apache.flink.api.java.ClosureCleaner;
+import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
+
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.Row;
 import com.datastax.driver.core.Session;
-import org.apache.flink.api.java.ClosureCleaner;
-import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
 
 import java.util.HashMap;
 import java.util.Iterator;
@@ -30,13 +32,13 @@ import java.util.Map;
 /**
  * CheckpointCommitter that saves information about completed checkpoints within a separate table in a cassandra
  * database.
- * 
+ *
  * <p>Entries are in the form |operator_id | subtask_id | last_completed_checkpoint|
  */
 public class CassandraCommitter extends CheckpointCommitter {
 
 	private static final long serialVersionUID = 1L;
-	
+
 	private final ClusterBuilder builder;
 	private transient Cluster cluster;
 	private transient Session session;

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraPojoSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraPojoSink.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraPojoSink.java
index 9cfb2f8..c9b29b8 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraPojoSink.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraPojoSink.java
@@ -17,14 +17,15 @@
 
 package org.apache.flink.streaming.connectors.cassandra;
 
+import org.apache.flink.configuration.Configuration;
+
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.mapping.Mapper;
 import com.datastax.driver.mapping.MappingManager;
 import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.flink.configuration.Configuration;
 
 /**
- * Flink Sink to save data into a Cassandra cluster using 
+ * Flink Sink to save data into a Cassandra cluster using
  * <a href="http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/mapping/Mapper.html">Mapper</a>,
  * which it uses annotations from
  * <a href="http://docs.datastax.com/en/drivers/java/2.1/com/datastax/driver/mapping/annotations/package-summary.html">
@@ -41,9 +42,9 @@ public class CassandraPojoSink<IN> extends CassandraSinkBase<IN, ResultSet> {
 	protected transient MappingManager mappingManager;
 
 	/**
-	 * The main constructor for creating CassandraPojoSink
+	 * The main constructor for creating CassandraPojoSink.
 	 *
-	 * @param clazz Class<IN> instance
+	 * @param clazz Class instance
 	 */
 	public CassandraPojoSink(Class<IN> clazz, ClusterBuilder builder) {
 		super(builder);

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraScalaProductSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraScalaProductSink.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraScalaProductSink.java
index a975985..1d1b634 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraScalaProductSink.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraScalaProductSink.java
@@ -1,41 +1,41 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.flink.streaming.connectors.cassandra;
-
-
-import scala.Product;
-
-/**
- * Sink to write scala tuples and case classes into a Cassandra cluster.
- *
- * @param <IN> Type of the elements emitted by this sink, it must extend {@link Product}
- */
-public class CassandraScalaProductSink<IN extends Product> extends AbstractCassandraTupleSink<IN> {
-	public CassandraScalaProductSink(String insertQuery, ClusterBuilder builder) {
-		super(insertQuery, builder);
-	}
-
-	@Override
-	protected Object[] extract(IN record) {
-		Object[] al = new Object[record.productArity()];
-		for (int i = 0; i < record.productArity(); i++) {
-			al[i] = record.productElement(i);
-		}
-		return al;
-	}
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.cassandra;
+
+import scala.Product;
+
+/**
+ * Sink to write scala tuples and case classes into a Cassandra cluster.
+ *
+ * @param <IN> Type of the elements emitted by this sink, it must extend {@link Product}
+ */
+public class CassandraScalaProductSink<IN extends Product> extends AbstractCassandraTupleSink<IN> {
+	public CassandraScalaProductSink(String insertQuery, ClusterBuilder builder) {
+		super(insertQuery, builder);
+	}
+
+	@Override
+	protected Object[] extract(IN record) {
+		Object[] al = new Object[record.productArity()];
+		for (int i = 0; i < record.productArity(); i++) {
+			al[i] = record.productElement(i);
+		}
+		return al;
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSink.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSink.java
index 6a33601..af138c5 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSink.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSink.java
@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra;
 
-import com.datastax.driver.core.Cluster;
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
@@ -32,6 +32,9 @@ import org.apache.flink.streaming.api.operators.ChainingStrategy;
 import org.apache.flink.streaming.api.transformations.SinkTransformation;
 import org.apache.flink.streaming.api.transformations.StreamTransformation;
 import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
+
+import com.datastax.driver.core.Cluster;
+
 import scala.Product;
 
 /**
@@ -79,10 +82,10 @@ public class CassandraSink<IN> {
 
 	/**
 	 * Sets an ID for this operator.
-	 * <p/>
+	 *
 	 * <p>The specified ID is used to assign the same operator ID across job
 	 * submissions (for example when starting a job from a savepoint).
-	 * <p/>
+	 *
 	 * <p><strong>Important</strong>: this ID needs to be unique per
 	 * transformation and job. Otherwise, job submission will fail.
 	 *
@@ -101,19 +104,17 @@ public class CassandraSink<IN> {
 
 	/**
 	 * Sets an user provided hash for this operator. This will be used AS IS the create the JobVertexID.
-	 * <p/>
+	 *
 	 * <p>The user provided hash is an alternative to the generated hashes, that is considered when identifying an
 	 * operator through the default hash mechanics fails (e.g. because of changes between Flink versions).
-	 * <p/>
+	 *
 	 * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting. The provided hash
 	 * needs to be unique per transformation and job. Otherwise, job submission will fail. Furthermore, you cannot
 	 * assign user-specified hash to intermediate nodes in an operator chain and trying so will let your job fail.
 	 *
-	 * <p>
-	 * A use case for this is in migration between Flink versions or changing the jobs in a way that changes the
+	 * <p>A use case for this is in migration between Flink versions or changing the jobs in a way that changes the
 	 * automatically generated hashes. In this case, providing the previous hashes directly through this method (e.g.
 	 * obtained from old logs) can help to reestablish a lost mapping from states to their target operator.
-	 * <p/>
 	 *
 	 * @param uidHash The user provided hash for this operator. This will become the JobVertexID, which is shown in the
 	 *                 logs and web ui.
@@ -168,10 +169,10 @@ public class CassandraSink<IN> {
 	 * Sets the slot sharing group of this operation. Parallel instances of
 	 * operations that are in the same slot sharing group will be co-located in the same
 	 * TaskManager slot, if possible.
-	 * <p/>
+	 *
 	 * <p>Operations inherit the slot sharing group of input operations if all input operations
 	 * are in the same slot sharing group and no slot sharing group was explicitly specified.
-	 * <p/>
+	 *
 	 * <p>Initially an operation is in the default slot sharing group. An operation can be put into
 	 * the default group explicitly by setting the slot sharing group to {@code "default"}.
 	 *
@@ -220,6 +221,10 @@ public class CassandraSink<IN> {
 		throw new IllegalArgumentException("No support for the type of the given DataStream: " + input.getType());
 	}
 
+	/**
+	 * Builder for a {@link CassandraSink}.
+	 * @param <IN>
+	 */
 	public abstract static class CassandraSinkBuilder<IN> {
 		protected final DataStream<IN> input;
 		protected final TypeSerializer<IN> serializer;
@@ -327,7 +332,7 @@ public class CassandraSink<IN> {
 				? createWriteAheadSink()
 				: createSink();
 		}
-		
+
 		protected abstract CassandraSink<IN> createSink() throws Exception;
 
 		protected abstract CassandraSink<IN> createWriteAheadSink() throws Exception;
@@ -339,6 +344,10 @@ public class CassandraSink<IN> {
 		}
 	}
 
+	/**
+	 * Builder for a {@link CassandraTupleSink}.
+	 * @param <IN>
+	 */
 	public static class CassandraTupleSinkBuilder<IN extends Tuple> extends CassandraSinkBuilder<IN> {
 		public CassandraTupleSinkBuilder(DataStream<IN> input, TypeInformation<IN> typeInfo, TypeSerializer<IN> serializer) {
 			super(input, typeInfo, serializer);
@@ -365,6 +374,10 @@ public class CassandraSink<IN> {
 		}
 	}
 
+	/**
+	 * Builder for a {@link CassandraPojoSink}.
+	 * @param <IN>
+	 */
 	public static class CassandraPojoSinkBuilder<IN> extends CassandraSinkBuilder<IN> {
 		public CassandraPojoSinkBuilder(DataStream<IN> input, TypeInformation<IN> typeInfo, TypeSerializer<IN> serializer) {
 			super(input, typeInfo, serializer);
@@ -389,6 +402,10 @@ public class CassandraSink<IN> {
 		}
 	}
 
+	/**
+	 * Builder for a {@link CassandraScalaProductSink}.
+	 * @param <IN>
+	 */
 	public static class CassandraScalaProductSinkBuilder<IN extends Product> extends CassandraSinkBuilder<IN> {
 
 		public CassandraScalaProductSinkBuilder(DataStream<IN> input, TypeInformation<IN> typeInfo, TypeSerializer<IN> serializer) {

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBase.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBase.java
index b1b261e..5da1f57 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBase.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBase.java
@@ -17,14 +17,15 @@
 
 package org.apache.flink.streaming.connectors.cassandra;
 
+import org.apache.flink.api.java.ClosureCleaner;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
+
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.Session;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.flink.api.java.ClosureCleaner;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -75,7 +76,7 @@ public abstract class CassandraSinkBase<IN, V> extends RichSinkFunction<IN> {
 					}
 				}
 				exception = t;
-				
+
 				log.error("Error while sending value.", t);
 			}
 		};

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSink.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSink.java
index a3d002e..fac7b8b 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSink.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSink.java
@@ -15,8 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra;
 
+import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.java.ClosureCleaner;
+import org.apache.flink.api.java.tuple.Tuple;
+import org.apache.flink.api.java.typeutils.runtime.TupleSerializer;
+import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
+import org.apache.flink.streaming.runtime.operators.GenericWriteAheadSink;
+
 import com.datastax.driver.core.BoundStatement;
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.PreparedStatement;
@@ -25,12 +33,6 @@ import com.datastax.driver.core.ResultSetFuture;
 import com.datastax.driver.core.Session;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
-import org.apache.flink.api.common.typeutils.TypeSerializer;
-import org.apache.flink.api.java.ClosureCleaner;
-import org.apache.flink.api.java.tuple.Tuple;
-import org.apache.flink.api.java.typeutils.runtime.TupleSerializer;
-import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
-import org.apache.flink.streaming.runtime.operators.GenericWriteAheadSink;
 
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/ClusterBuilder.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/ClusterBuilder.java b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/ClusterBuilder.java
index 9fd3b4e..4dedda4 100644
--- a/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/ClusterBuilder.java
+++ b/flink-connectors/flink-connector-cassandra/src/main/java/org/apache/flink/streaming/connectors/cassandra/ClusterBuilder.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra;
 
 import com.datastax.driver.core.Cluster;

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/example/BatchExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/example/BatchExample.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/example/BatchExample.java
index e66b8b3..af21f2d 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/example/BatchExample.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/example/BatchExample.java
@@ -14,10 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.batch.connectors.cassandra.example;
 
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.Cluster.Builder;
 import org.apache.flink.api.common.typeinfo.TypeHint;
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
@@ -27,12 +26,15 @@ import org.apache.flink.batch.connectors.cassandra.CassandraInputFormat;
 import org.apache.flink.batch.connectors.cassandra.CassandraOutputFormat;
 import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
 
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.Cluster.Builder;
+
 import java.util.ArrayList;
 
 /**
  * This is an example showing the to use the Cassandra Input-/OutputFormats in the Batch API.
- * 
- * The example assumes that a table exists in a local cassandra database, according to the following query: 
+ *
+ * <p>The example assumes that a table exists in a local cassandra database, according to the following query:
  * CREATE TABLE test.batches (number int, strings text, PRIMARY KEY(number, strings));
  */
 public class BatchExample {

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java
index e6924a3..fe538a8 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java
@@ -18,15 +18,6 @@
 
 package org.apache.flink.streaming.connectors.cassandra;
 
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.QueryOptions;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Session;
-
-import org.apache.cassandra.service.CassandraDaemon;
-
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.io.InputFormat;
@@ -47,16 +38,20 @@ import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.runtime.operators.WriteAheadSinkTestBase;
 
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ConsistencyLevel;
+import com.datastax.driver.core.QueryOptions;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+import com.datastax.driver.core.Session;
+import org.apache.cassandra.service.CassandraDaemon;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.collection.JavaConverters;
-import scala.collection.Seq;
 
 import java.io.BufferedWriter;
 import java.io.File;
@@ -69,8 +64,14 @@ import java.util.Random;
 import java.util.Scanner;
 import java.util.UUID;
 
-import static org.junit.Assert.*;
+import scala.collection.JavaConverters;
+import scala.collection.Seq;
+
+import static org.junit.Assert.assertTrue;
 
+/**
+ * IT cases for all cassandra sinks.
+ */
 @SuppressWarnings("serial")
 public class CassandraConnectorITCase extends WriteAheadSinkTestBase<Tuple3<String, Integer, Integer>, CassandraTupleWriteAheadSink<Tuple3<String, Integer, Integer>>> {
 
@@ -138,7 +139,7 @@ public class CassandraConnectorITCase extends WriteAheadSinkTestBase<Tuple3<Stri
 		ClassLoader classLoader = CassandraConnectorITCase.class.getClassLoader();
 		File file = new File(classLoader.getResource("cassandra.yaml").getFile());
 		File tmp = new File(tmpDir.getAbsolutePath() + File.separator + "cassandra.yaml");
-		
+
 		assertTrue(tmp.createNewFile());
 
 		try (
@@ -155,7 +156,6 @@ public class CassandraConnectorITCase extends WriteAheadSinkTestBase<Tuple3<Stri
 			}
 		}
 
-
 		// Tell cassandra where the configuration files are.
 		// Use the test configuration file.
 		System.setProperty("cassandra.config", tmp.getAbsoluteFile().toURI().toString());
@@ -468,11 +468,11 @@ public class CassandraConnectorITCase extends WriteAheadSinkTestBase<Tuple3<Stri
 
 		ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY));
 		List<Row> rows = rs.all();
-		assertEquals(scalaTupleCollection.size(), rows.size());
+		Assert.assertEquals(scalaTupleCollection.size(), rows.size());
 
 		for (Row row : rows) {
 			scalaTupleCollection.remove(new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id")));
 		}
-		assertEquals(0, scalaTupleCollection.size());
+		Assert.assertEquals(0, scalaTupleCollection.size());
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java
index 847d1a0..06a9335 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java
@@ -15,18 +15,20 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra;
 
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ResultSetFuture;
-import com.datastax.driver.core.Session;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.java.tuple.Tuple0;
 import org.apache.flink.api.java.typeutils.TupleTypeInfo;
 import org.apache.flink.streaming.runtime.operators.CheckpointCommitter;
 import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSetFuture;
+import com.datastax.driver.core.Session;
 import org.junit.Test;
 import org.mockito.Matchers;
 import org.mockito.invocation.InvocationOnMock;
@@ -43,9 +45,12 @@ import static org.powermock.api.mockito.PowerMockito.doAnswer;
 import static org.powermock.api.mockito.PowerMockito.mock;
 import static org.powermock.api.mockito.PowerMockito.when;
 
+/**
+ * Tests for the {@link CassandraTupleWriteAheadSink}.
+ */
 public class CassandraTupleWriteAheadSinkTest {
 
-	@Test(timeout=20000)
+	@Test(timeout = 20000)
 	public void testAckLoopExitOnException() throws Exception {
 		final AtomicReference<Runnable> runnableFuture = new AtomicReference<>();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/Pojo.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/Pojo.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/Pojo.java
index 9b331d6..226043f 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/Pojo.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/Pojo.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra;
 
 import com.datastax.driver.mapping.annotations.Column;
@@ -21,6 +22,9 @@ import com.datastax.driver.mapping.annotations.Table;
 
 import java.io.Serializable;
 
+/**
+ * Test Pojo with DataStax annotations used.
+ */
 @Table(keyspace = "flink", name = "test")
 public class Pojo implements Serializable {
 
@@ -31,12 +35,12 @@ public class Pojo implements Serializable {
 	@Column(name = "counter")
 	private int counter;
 	@Column(name = "batch_id")
-	private int batch_id;
+	private int batchID;
 
-	public Pojo(String id, int counter, int batch_id) {
+	public Pojo(String id, int counter, int batchID) {
 		this.id = id;
 		this.counter = counter;
-		this.batch_id = batch_id;
+		this.batchID = batchID;
 	}
 
 	public String getId() {
@@ -55,11 +59,11 @@ public class Pojo implements Serializable {
 		this.counter = counter;
 	}
 
-	public int getBatch_id() {
-		return batch_id;
+	public int getBatchID() {
+		return batchID;
 	}
 
-	public void setBatch_id(int batch_id) {
-		this.batch_id = batch_id;
+	public void setBatchID(int batchId) {
+		this.batchID = batchId;
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraPojoSinkExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraPojoSinkExample.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraPojoSinkExample.java
index e1bcea9..a38b73b 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraPojoSinkExample.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraPojoSinkExample.java
@@ -17,21 +17,22 @@
 
 package org.apache.flink.streaming.connectors.cassandra.example;
 
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.Cluster.Builder;
 import org.apache.flink.streaming.api.datastream.DataStreamSource;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.cassandra.CassandraSink;
 import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
 
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.Cluster.Builder;
+
 import java.util.ArrayList;
 
 /**
  * This is an example showing the to use the Pojo Cassandra Sink in the Streaming API.
- * 
- * Pojo's have to be annotated with datastax annotations to work with this sink.
  *
- * The example assumes that a table exists in a local cassandra database, according to the following query:
+ * <p>Pojo's have to be annotated with datastax annotations to work with this sink.
+ *
+ * <p>The example assumes that a table exists in a local cassandra database, according to the following query:
  * CREATE TABLE IF NOT EXISTS test.message(body txt PRIMARY KEY)
  */
 public class CassandraPojoSinkExample {

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleSinkExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleSinkExample.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleSinkExample.java
index c6345df..ce2326f 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleSinkExample.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleSinkExample.java
@@ -14,22 +14,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra.example;
 
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.Cluster.Builder;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.streaming.api.datastream.DataStreamSource;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.connectors.cassandra.CassandraSink;
 import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
 
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.Cluster.Builder;
+
 import java.util.ArrayList;
 
 /**
  * This is an example showing the to use the Tuple Cassandra Sink in the Streaming API.
  *
- * The example assumes that a table exists in a local cassandra database, according to the following query:
+ * <p>The example assumes that a table exists in a local cassandra database, according to the following query:
  * CREATE TABLE IF NOT EXISTS test.writetuple(element1 text PRIMARY KEY, element2 int)
  */
 public class CassandraTupleSinkExample {

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleWriteAheadSinkExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleWriteAheadSinkExample.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleWriteAheadSinkExample.java
index 23de949..38618fe 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleWriteAheadSinkExample.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/CassandraTupleWriteAheadSinkExample.java
@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra.example;
 
-import com.datastax.driver.core.Cluster;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.runtime.state.filesystem.FsStateBackend;
@@ -27,6 +27,8 @@ import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.cassandra.CassandraSink;
 import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
 
+import com.datastax.driver.core.Cluster;
+
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
@@ -34,10 +36,10 @@ import java.util.UUID;
 /**
  * This is an example showing the to use the Cassandra Sink (with write-ahead log) in the Streaming API.
  *
- * The example assumes that a table exists in a local cassandra database, according to the following query:
+ * <p>The example assumes that a table exists in a local cassandra database, according to the following query:
  * CREATE TABLE example.values (id text, count int, PRIMARY KEY(id));
- * 
- * Important things to note are that checkpointing is enabled, a StateBackend is set and the enableWriteAheadLog() call
+ *
+ * <p>Important things to note are that checkpointing is enabled, a StateBackend is set and the enableWriteAheadLog() call
  * when creating the CassandraSink.
  */
 public class CassandraTupleWriteAheadSinkExample {
@@ -67,7 +69,7 @@ public class CassandraTupleWriteAheadSinkExample {
 		env.execute();
 	}
 
-	public static class MySource implements SourceFunction<Tuple2<String, Integer>>, ListCheckpointed<Integer> {
+	private static class MySource implements SourceFunction<Tuple2<String, Integer>>, ListCheckpointed<Integer> {
 		private static final long serialVersionUID = 4022367939215095610L;
 
 		private int counter = 0;

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/Message.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/Message.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/Message.java
index 7524d95..512d0ea 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/Message.java
+++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/example/Message.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.streaming.connectors.cassandra.example;
 
 import com.datastax.driver.mapping.annotations.Column;
@@ -21,6 +22,9 @@ import com.datastax.driver.mapping.annotations.Table;
 
 import java.io.Serializable;
 
+/**
+ * Pojo with DataStax annotations..
+ */
 @Table(keyspace = "test", name = "message")
 public class Message implements Serializable {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/1a3a5b6e/flink-connectors/flink-connector-cassandra/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-cassandra/src/test/resources/log4j-test.properties b/flink-connectors/flink-connector-cassandra/src/test/resources/log4j-test.properties
index a43d556..c1d3cca 100644
--- a/flink-connectors/flink-connector-cassandra/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-connector-cassandra/src/test/resources/log4j-test.properties
@@ -26,4 +26,3 @@ log4j.appender.testlogger.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
 # suppress the irrelevant (wrong) warnings from the netty channel handler
 log4j.logger.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, testlogger
 
-


[15/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-hadoop-compatibility

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceCombineFunctionITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceCombineFunctionITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceCombineFunctionITCase.java
index 13d971c..b1135f0 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceCombineFunctionITCase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceCombineFunctionITCase.java
@@ -18,9 +18,6 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapred;
 
-import java.io.IOException;
-import java.util.Iterator;
-
 import org.apache.flink.api.common.functions.MapFunction;
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
@@ -28,6 +25,7 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction;
 import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction;
 import org.apache.flink.test.util.MultipleProgramsTestBase;
+
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -41,6 +39,12 @@ import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * IT case for the {@link HadoopReduceCombineFunction}.
+ */
 @RunWith(Parameterized.class)
 public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase {
 
@@ -68,7 +72,7 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		counts.writeAsText(resultPath);
 		env.execute();
 
-		String expected = "(0,5)\n"+
+		String expected = "(0,5)\n" +
 				"(1,6)\n" +
 				"(2,6)\n" +
 				"(3,4)\n";
@@ -115,7 +119,7 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		counts.writeAsText(resultPath);
 		env.execute();
 
-		String expected = "(0,5)\n"+
+		String expected = "(0,5)\n" +
 				"(1,6)\n" +
 				"(2,5)\n" +
 				"(3,5)\n";
@@ -144,7 +148,7 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		env.execute();
 
 		// return expected result
-		String expected = "(0,0)\n"+
+		String expected = "(0,0)\n" +
 				"(1,0)\n" +
 				"(2,1)\n" +
 				"(3,1)\n" +
@@ -152,62 +156,71 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 
 		compareResultsByLinesInMemory(expected, resultPath);
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts.
+	 */
 	public static class SumReducer implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
 
 		@Override
 		public void reduce(IntWritable k, Iterator<IntWritable> v, OutputCollector<IntWritable, IntWritable> out, Reporter r)
 				throws IOException {
-			
+
 			int sum = 0;
-			while(v.hasNext()) {
+			while (v.hasNext()) {
 				sum += v.next().get();
 			}
 			out.collect(k, new IntWritable(sum));
 		}
-		
+
 		@Override
 		public void configure(JobConf arg0) { }
 
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts that modifies the key.
+	 */
 	public static class KeyChangingReducer implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
 
 		@Override
 		public void reduce(IntWritable k, Iterator<IntWritable> v, OutputCollector<IntWritable, IntWritable> out, Reporter r)
 				throws IOException {
-			while(v.hasNext()) {
+			while (v.hasNext()) {
 				out.collect(new IntWritable(k.get() % 4), v.next());
 			}
 		}
-		
+
 		@Override
 		public void configure(JobConf arg0) { }
 
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts for a specific prefix.
+	 */
 	public static class ConfigurableCntReducer implements Reducer<IntWritable, Text, IntWritable, IntWritable> {
 		private String countPrefix;
-		
+
 		@Override
 		public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
 				throws IOException {
 			int commentCnt = 0;
-			while(vs.hasNext()) {
+			while (vs.hasNext()) {
 				String v = vs.next().toString();
-				if(v.startsWith(this.countPrefix)) {
+				if (v.startsWith(this.countPrefix)) {
 					commentCnt++;
 				}
 			}
 			out.collect(k, new IntWritable(commentCnt));
 		}
-		
+
 		@Override
-		public void configure(final JobConf c) { 
+		public void configure(final JobConf c) {
 			this.countPrefix = c.get("my.cntPrefix");
 		}
 
@@ -215,10 +228,13 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		public void close() throws IOException { }
 	}
 
+	/**
+	 * Test mapper.
+	 */
 	public static class Mapper1 implements MapFunction<Tuple2<IntWritable, Text>, Tuple2<IntWritable,
 			IntWritable>> {
 		private static final long serialVersionUID = 1L;
-		Tuple2<IntWritable,IntWritable> outT = new Tuple2<IntWritable,IntWritable>();
+		Tuple2<IntWritable, IntWritable> outT = new Tuple2<IntWritable, IntWritable>();
 		@Override
 		public Tuple2<IntWritable, IntWritable> map(Tuple2<IntWritable, Text> v)
 		throws Exception {
@@ -228,10 +244,13 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		}
 	}
 
+	/**
+	 * Test mapper.
+	 */
 	public static class Mapper2 implements MapFunction<Tuple2<IntWritable, Text>, Tuple2<IntWritable,
 			IntWritable>> {
 		private static final long serialVersionUID = 1L;
-		Tuple2<IntWritable,IntWritable> outT = new Tuple2<IntWritable,IntWritable>();
+		Tuple2<IntWritable, IntWritable> outT = new Tuple2<IntWritable, IntWritable>();
 		@Override
 		public Tuple2<IntWritable, IntWritable> map(Tuple2<IntWritable, Text> v)
 		throws Exception {
@@ -241,9 +260,12 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		}
 	}
 
+	/**
+	 * Test mapper.
+	 */
 	public static class Mapper3 implements MapFunction<Tuple2<IntWritable, Text>, Tuple2<IntWritable, IntWritable>> {
 		private static final long serialVersionUID = 1L;
-		Tuple2<IntWritable,IntWritable> outT = new Tuple2<IntWritable,IntWritable>();
+		Tuple2<IntWritable, IntWritable> outT = new Tuple2<IntWritable, IntWritable>();
 		@Override
 		public Tuple2<IntWritable, IntWritable> map(Tuple2<IntWritable, Text> v)
 		throws Exception {
@@ -253,6 +275,9 @@ public class HadoopReduceCombineFunctionITCase extends MultipleProgramsTestBase
 		}
 	}
 
+	/**
+	 * Test mapper.
+	 */
 	public static class Mapper4 implements MapFunction<Tuple2<IntWritable, Text>, Tuple2<IntWritable, Text>> {
 		private static final long serialVersionUID = 1L;
 		@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceFunctionITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceFunctionITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceFunctionITCase.java
index abc0e9c..3a22af0 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceFunctionITCase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceFunctionITCase.java
@@ -18,15 +18,13 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapred;
 
-import java.io.IOException;
-import java.util.Iterator;
-
 import org.apache.flink.api.common.functions.MapFunction;
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction;
 import org.apache.flink.test.util.MultipleProgramsTestBase;
+
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -39,6 +37,12 @@ import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * IT cases for the {@link HadoopReduceFunction}.
+ */
 @RunWith(Parameterized.class)
 public class HadoopReduceFunctionITCase extends MultipleProgramsTestBase {
 
@@ -65,7 +69,7 @@ public class HadoopReduceFunctionITCase extends MultipleProgramsTestBase {
 		commentCnts.writeAsText(resultPath);
 		env.execute();
 
-		String expected = "(0,0)\n"+
+		String expected = "(0,0)\n" +
 				"(1,3)\n" +
 				"(2,5)\n" +
 				"(3,5)\n" +
@@ -113,7 +117,7 @@ public class HadoopReduceFunctionITCase extends MultipleProgramsTestBase {
 		helloCnts.writeAsText(resultPath);
 		env.execute();
 
-		String expected = "(0,0)\n"+
+		String expected = "(0,0)\n" +
 				"(1,0)\n" +
 				"(2,1)\n" +
 				"(3,1)\n" +
@@ -121,69 +125,78 @@ public class HadoopReduceFunctionITCase extends MultipleProgramsTestBase {
 
 		compareResultsByLinesInMemory(expected, resultPath);
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts.
+	 */
 	public static class CommentCntReducer implements Reducer<IntWritable, Text, IntWritable, IntWritable> {
-		
+
 		@Override
 		public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
 				throws IOException {
 			int commentCnt = 0;
-			while(vs.hasNext()) {
+			while (vs.hasNext()) {
 				String v = vs.next().toString();
-				if(v.startsWith("Comment")) {
+				if (v.startsWith("Comment")) {
 					commentCnt++;
 				}
 			}
 			out.collect(k, new IntWritable(commentCnt));
 		}
-		
+
 		@Override
 		public void configure(final JobConf arg0) { }
 
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts.
+	 */
 	public static class AllCommentCntReducer implements Reducer<IntWritable, Text, IntWritable, IntWritable> {
-		
+
 		@Override
 		public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
 				throws IOException {
 			int commentCnt = 0;
-			while(vs.hasNext()) {
+			while (vs.hasNext()) {
 				String v = vs.next().toString();
-				if(v.startsWith("Comment")) {
+				if (v.startsWith("Comment")) {
 					commentCnt++;
 				}
 			}
 			out.collect(new IntWritable(42), new IntWritable(commentCnt));
 		}
-		
+
 		@Override
 		public void configure(final JobConf arg0) { }
 
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts for a specific prefix.
+	 */
 	public static class ConfigurableCntReducer implements Reducer<IntWritable, Text, IntWritable, IntWritable> {
 		private String countPrefix;
-		
+
 		@Override
 		public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
 				throws IOException {
 			int commentCnt = 0;
-			while(vs.hasNext()) {
+			while (vs.hasNext()) {
 				String v = vs.next().toString();
-				if(v.startsWith(this.countPrefix)) {
+				if (v.startsWith(this.countPrefix)) {
 					commentCnt++;
 				}
 			}
 			out.collect(k, new IntWritable(commentCnt));
 		}
-		
+
 		@Override
-		public void configure(final JobConf c) { 
+		public void configure(final JobConf c) {
 			this.countPrefix = c.get("my.cntPrefix");
 		}
 
@@ -191,6 +204,9 @@ public class HadoopReduceFunctionITCase extends MultipleProgramsTestBase {
 		public void close() throws IOException { }
 	}
 
+	/**
+	 * Test mapper.
+	 */
 	public static class Mapper1 implements MapFunction<Tuple2<IntWritable, Text>, Tuple2<IntWritable, Text>> {
 		private static final long serialVersionUID = 1L;
 		@Override
@@ -201,6 +217,9 @@ public class HadoopReduceFunctionITCase extends MultipleProgramsTestBase {
 		}
 	}
 
+	/**
+	 * Test mapper.
+	 */
 	public static class Mapper2 implements MapFunction<Tuple2<IntWritable, Text>, Tuple2<IntWritable, Text>> {
 		private static final long serialVersionUID = 1L;
 		@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopTestData.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopTestData.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopTestData.java
index eed6f8f..b1992ff 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopTestData.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopTestData.java
@@ -18,45 +18,49 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapred;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple2;
+
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Test data.
+ */
 public class HadoopTestData {
 
 	public static DataSet<Tuple2<IntWritable, Text>> getKVPairDataSet(ExecutionEnvironment env) {
-		
+
 		List<Tuple2<IntWritable, Text>> data = new ArrayList<Tuple2<IntWritable, Text>>();
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(1),new Text("Hi")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(2),new Text("Hello")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(3),new Text("Hello world")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(4),new Text("Hello world, how are you?")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(5),new Text("I am fine.")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(6),new Text("Luke Skywalker")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(7),new Text("Comment#1")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(8),new Text("Comment#2")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(9),new Text("Comment#3")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(10),new Text("Comment#4")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(11),new Text("Comment#5")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(12),new Text("Comment#6")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(13),new Text("Comment#7")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(14),new Text("Comment#8")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(15),new Text("Comment#9")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(16),new Text("Comment#10")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(17),new Text("Comment#11")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(18),new Text("Comment#12")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(19),new Text("Comment#13")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(20),new Text("Comment#14")));
-		data.add(new Tuple2<IntWritable, Text>(new IntWritable(21),new Text("Comment#15")));
-		
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(1), new Text("Hi")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(2), new Text("Hello")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(3), new Text("Hello world")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(4), new Text("Hello world, how are you?")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(5), new Text("I am fine.")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(6), new Text("Luke Skywalker")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(7), new Text("Comment#1")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(8), new Text("Comment#2")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(9), new Text("Comment#3")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(10), new Text("Comment#4")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(11), new Text("Comment#5")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(12), new Text("Comment#6")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(13), new Text("Comment#7")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(14), new Text("Comment#8")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(15), new Text("Comment#9")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(16), new Text("Comment#10")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(17), new Text("Comment#11")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(18), new Text("Comment#12")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(19), new Text("Comment#13")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(20), new Text("Comment#14")));
+		data.add(new Tuple2<IntWritable, Text>(new IntWritable(21), new Text("Comment#15")));
+
 		Collections.shuffle(data);
-		
+
 		return env.fromCollection(data);
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/example/HadoopMapredCompatWordCount.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/example/HadoopMapredCompatWordCount.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/example/HadoopMapredCompatWordCount.java
index ce0143a..2bf69bd 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/example/HadoopMapredCompatWordCount.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/example/HadoopMapredCompatWordCount.java
@@ -18,16 +18,14 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapred.example;
 
-import java.io.IOException;
-import java.util.Iterator;
-
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
-import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.hadoop.mapred.HadoopInputFormat;
-import org.apache.flink.hadoopcompatibility.mapred.HadoopMapFunction;
 import org.apache.flink.api.java.hadoop.mapred.HadoopOutputFormat;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.hadoopcompatibility.mapred.HadoopMapFunction;
 import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
@@ -39,95 +37,101 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.mapred.TextOutputFormat;
 
-
+import java.io.IOException;
+import java.util.Iterator;
 
 /**
  * Implements a word count which takes the input file and counts the number of
  * occurrences of each word in the file and writes the result back to disk.
- * 
- * This example shows how to use Hadoop Input Formats, how to convert Hadoop Writables to 
+ *
+ * <p>This example shows how to use Hadoop Input Formats, how to convert Hadoop Writables to
  * common Java types for better usage in a Flink job and how to use Hadoop Output Formats.
  */
 public class HadoopMapredCompatWordCount {
-	
+
 	public static void main(String[] args) throws Exception {
 		if (args.length < 2) {
 			System.err.println("Usage: WordCount <input path> <result path>");
 			return;
 		}
-		
+
 		final String inputPath = args[0];
 		final String outputPath = args[1];
-		
+
 		final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-		
+
 		// Set up the Hadoop Input Format
 		HadoopInputFormat<LongWritable, Text> hadoopInputFormat = new HadoopInputFormat<LongWritable, Text>(new TextInputFormat(), LongWritable.class, Text.class, new JobConf());
 		TextInputFormat.addInputPath(hadoopInputFormat.getJobConf(), new Path(inputPath));
-		
+
 		// Create a Flink job with it
 		DataSet<Tuple2<LongWritable, Text>> text = env.createInput(hadoopInputFormat);
-		
-		DataSet<Tuple2<Text, LongWritable>> words = 
+
+		DataSet<Tuple2<Text, LongWritable>> words =
 				text.flatMap(new HadoopMapFunction<LongWritable, Text, Text, LongWritable>(new Tokenizer()))
 					.groupBy(0).reduceGroup(new HadoopReduceCombineFunction<Text, LongWritable, Text, LongWritable>(new Counter(), new Counter()));
-		
+
 		// Set up Hadoop Output Format
-		HadoopOutputFormat<Text, LongWritable> hadoopOutputFormat = 
+		HadoopOutputFormat<Text, LongWritable> hadoopOutputFormat =
 				new HadoopOutputFormat<Text, LongWritable>(new TextOutputFormat<Text, LongWritable>(), new JobConf());
 		hadoopOutputFormat.getJobConf().set("mapred.textoutputformat.separator", " ");
 		TextOutputFormat.setOutputPath(hadoopOutputFormat.getJobConf(), new Path(outputPath));
-		
+
 		// Output & Execute
 		words.output(hadoopOutputFormat).setParallelism(1);
 		env.execute("Hadoop Compat WordCount");
 	}
-	
-	
+
+	/**
+	 * A {@link Mapper} that splits a line into words.
+	 */
 	public static final class Tokenizer implements Mapper<LongWritable, Text, Text, LongWritable> {
 
 		@Override
-		public void map(LongWritable k, Text v, OutputCollector<Text, LongWritable> out, Reporter rep) 
+		public void map(LongWritable k, Text v, OutputCollector<Text, LongWritable> out, Reporter rep)
 				throws IOException {
 			// normalize and split the line
 			String line = v.toString();
 			String[] tokens = line.toLowerCase().split("\\W+");
-			
+
 			// emit the pairs
 			for (String token : tokens) {
 				if (token.length() > 0) {
-					out.collect(new Text(token), new LongWritable(1l));
+					out.collect(new Text(token), new LongWritable(1L));
 				}
 			}
 		}
-		
+
 		@Override
 		public void configure(JobConf arg0) { }
-		
+
 		@Override
 		public void close() throws IOException { }
-		
+
 	}
-	
+
+	/**
+	 * A {@link Reducer} to sum counts.
+	 */
 	public static final class Counter implements Reducer<Text, LongWritable, Text, LongWritable> {
 
 		@Override
 		public void reduce(Text k, Iterator<LongWritable> vs, OutputCollector<Text, LongWritable> out, Reporter rep)
 				throws IOException {
-			
+
 			long cnt = 0;
-			while(vs.hasNext()) {
+			while (vs.hasNext()) {
 				cnt += vs.next().get();
 			}
 			out.collect(k, new LongWritable(cnt));
-			
+
 		}
-		
+
 		@Override
 		public void configure(JobConf arg0) { }
-		
+
 		@Override
 		public void close() throws IOException { }
 	}
-	
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIteratorTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIteratorTest.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIteratorTest.java
index 524318c..ff7c1b7 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIteratorTest.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/wrapper/HadoopTupleUnwrappingIteratorTest.java
@@ -18,43 +18,47 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapred.wrapper;
 
-import java.util.ArrayList;
-import java.util.NoSuchElementException;
-
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.typeutils.runtime.WritableSerializer;
 import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopTupleUnwrappingIterator;
+
 import org.apache.hadoop.io.IntWritable;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.ArrayList;
+import java.util.NoSuchElementException;
+
+/**
+ * Tests for the {@link HadoopTupleUnwrappingIterator}.
+ */
 public class HadoopTupleUnwrappingIteratorTest {
 
 	@Test
 	public void testValueIterator() {
-		
-		HadoopTupleUnwrappingIterator<IntWritable, IntWritable> valIt = 
+
+		HadoopTupleUnwrappingIterator<IntWritable, IntWritable> valIt =
 				new HadoopTupleUnwrappingIterator<IntWritable, IntWritable>(new WritableSerializer
 						<IntWritable>(IntWritable.class));
-		
+
 		// many values
-		
+
 		ArrayList<Tuple2<IntWritable, IntWritable>> tList = new ArrayList<Tuple2<IntWritable, IntWritable>>();
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(1)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(2)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(3)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(4)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(5)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(6)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(7)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1),new IntWritable(8)));
-		
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(1)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(2)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(3)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(4)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(5)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(6)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(7)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(1), new IntWritable(8)));
+
 		int expectedKey = 1;
-		int[] expectedValues = new int[] {1,2,3,4,5,6,7,8};
-		
+		int[] expectedValues = new int[] {1, 2, 3, 4, 5, 6, 7, 8};
+
 		valIt.set(tList.iterator());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		for(int expectedValue : expectedValues) {
+		for (int expectedValue : expectedValues) {
 			Assert.assertTrue(valIt.hasNext());
 			Assert.assertTrue(valIt.hasNext());
 			Assert.assertTrue(valIt.next().get() == expectedValue);
@@ -63,18 +67,18 @@ public class HadoopTupleUnwrappingIteratorTest {
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		
+
 		// one value
-		
+
 		tList.clear();
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(2),new IntWritable(10)));
-		
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(2), new IntWritable(10)));
+
 		expectedKey = 2;
 		expectedValues = new int[]{10};
-		
+
 		valIt.set(tList.iterator());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		for(int expectedValue : expectedValues) {
+		for (int expectedValue : expectedValues) {
 			Assert.assertTrue(valIt.hasNext());
 			Assert.assertTrue(valIt.hasNext());
 			Assert.assertTrue(valIt.next().get() == expectedValue);
@@ -83,23 +87,23 @@ public class HadoopTupleUnwrappingIteratorTest {
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		
+
 		// more values
-		
+
 		tList.clear();
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3),new IntWritable(10)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3),new IntWritable(4)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3),new IntWritable(7)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3),new IntWritable(9)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4),new IntWritable(21)));
-		
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3), new IntWritable(10)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3), new IntWritable(4)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3), new IntWritable(7)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(3), new IntWritable(9)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4), new IntWritable(21)));
+
 		expectedKey = 3;
-		expectedValues = new int[]{10,4,7,9,21};
-		
+		expectedValues = new int[]{10, 4, 7, 9, 21};
+
 		valIt.set(tList.iterator());
 		Assert.assertTrue(valIt.hasNext());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		for(int expectedValue : expectedValues) {
+		for (int expectedValue : expectedValues) {
 			Assert.assertTrue(valIt.hasNext());
 			Assert.assertTrue(valIt.hasNext());
 			Assert.assertTrue(valIt.next().get() == expectedValue);
@@ -108,22 +112,22 @@ public class HadoopTupleUnwrappingIteratorTest {
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		
+
 		// no has next calls
-		
+
 		tList.clear();
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4),new IntWritable(5)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4),new IntWritable(8)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4),new IntWritable(42)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4),new IntWritable(-1)));
-		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4),new IntWritable(0)));
-		
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4), new IntWritable(5)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4), new IntWritable(8)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4), new IntWritable(42)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4), new IntWritable(-1)));
+		tList.add(new Tuple2<IntWritable, IntWritable>(new IntWritable(4), new IntWritable(0)));
+
 		expectedKey = 4;
-		expectedValues = new int[]{5,8,42,-1,0};
-		
+		expectedValues = new int[]{5, 8, 42, -1, 0};
+
 		valIt.set(tList.iterator());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
-		for(int expectedValue : expectedValues) {
+		for (int expectedValue : expectedValues) {
 			Assert.assertTrue(valIt.next().get() == expectedValue);
 		}
 		try {
@@ -135,5 +139,5 @@ public class HadoopTupleUnwrappingIteratorTest {
 		Assert.assertFalse(valIt.hasNext());
 		Assert.assertTrue(valIt.getCurrentKey().get() == expectedKey);
 	}
-	
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/HadoopInputOutputITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/HadoopInputOutputITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/HadoopInputOutputITCase.java
index 48aa258..a23a50d 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/HadoopInputOutputITCase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/HadoopInputOutputITCase.java
@@ -18,36 +18,42 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapreduce;
 
+import org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat;
+import org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormat;
 import org.apache.flink.test.hadoopcompatibility.mapreduce.example.WordCount;
 import org.apache.flink.test.testdata.WordCountData;
 import org.apache.flink.test.util.JavaProgramTestBase;
 import org.apache.flink.util.OperatingSystem;
+
 import org.junit.Assume;
 import org.junit.Before;
 
+/**
+ * IT cases for both the {@link HadoopInputFormat} and {@link HadoopOutputFormat}.
+ */
 public class HadoopInputOutputITCase extends JavaProgramTestBase {
-	
+
 	protected String textPath;
 	protected String resultPath;
-	
+
 	@Before
 	public void checkOperatingSystem() {
 		// FLINK-5164 - see https://wiki.apache.org/hadoop/WindowsProblems
 		Assume.assumeTrue("This test can't run successfully on Windows.", !OperatingSystem.isWindows());
 	}
-	
+
 	@Override
 	protected void preSubmit() throws Exception {
 		textPath = createTempFile("text.txt", WordCountData.TEXT);
 		resultPath = getTempDirPath("result");
 		this.setParallelism(4);
 	}
-	
+
 	@Override
 	protected void postSubmit() throws Exception {
 		compareResultsByLinesInMemory(WordCountData.COUNTS, resultPath, new String[]{".", "_"});
 	}
-	
+
 	@Override
 	protected void testProgram() throws Exception {
 		WordCount.main(new String[] { textPath, resultPath });

http://git-wip-us.apache.org/repos/asf/flink/blob/fab8fe57/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
index ed83d78..09af3df 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
@@ -18,11 +18,16 @@
 
 package org.apache.flink.test.hadoopcompatibility.mapreduce.example;
 
-import org.apache.flink.api.java.aggregation.Aggregations;
 import org.apache.flink.api.common.functions.RichFlatMapFunction;
 import org.apache.flink.api.common.functions.RichMapFunction;
+import org.apache.flink.api.java.DataSet;
+import org.apache.flink.api.java.ExecutionEnvironment;
+import org.apache.flink.api.java.aggregation.Aggregations;
+import org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat;
+import org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormat;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.util.Collector;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
@@ -30,71 +35,67 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.flink.api.java.DataSet;
-import org.apache.flink.api.java.ExecutionEnvironment;
-import org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat;
-import org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormat;
 
 /**
  * Implements a word count which takes the input file and counts the number of
  * occurrences of each word in the file and writes the result back to disk.
- * 
- * This example shows how to use Hadoop Input Formats, how to convert Hadoop Writables to 
+ *
+ * <p>This example shows how to use Hadoop Input Formats, how to convert Hadoop Writables to
  * common Java types for better usage in a Flink job and how to use Hadoop Output Formats.
  */
 @SuppressWarnings("serial")
 public class WordCount {
-	
+
 	public static void main(String[] args) throws Exception {
 		if (args.length < 2) {
 			System.err.println("Usage: WordCount <input path> <result path>");
 			return;
 		}
-		
+
 		final String inputPath = args[0];
 		final String outputPath = args[1];
-		
+
 		final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-		
+
 		// Set up the Hadoop Input Format
 		Job job = Job.getInstance();
 		HadoopInputFormat<LongWritable, Text> hadoopInputFormat = new HadoopInputFormat<LongWritable, Text>(new TextInputFormat(), LongWritable.class, Text.class, job);
 		TextInputFormat.addInputPath(job, new Path(inputPath));
-		
+
 		// Create a Flink job with it
 		DataSet<Tuple2<LongWritable, Text>> text = env.createInput(hadoopInputFormat);
-		
+
 		// Tokenize the line and convert from Writable "Text" to String for better handling
 		DataSet<Tuple2<String, Integer>> words = text.flatMap(new Tokenizer());
-		
+
 		// Sum up the words
 		DataSet<Tuple2<String, Integer>> result = words.groupBy(0).aggregate(Aggregations.SUM, 1);
-		
+
 		// Convert String back to Writable "Text" for use with Hadoop Output Format
 		DataSet<Tuple2<Text, IntWritable>> hadoopResult = result.map(new HadoopDatatypeMapper());
-		
+
 		// Set up Hadoop Output Format
 		HadoopOutputFormat<Text, IntWritable> hadoopOutputFormat = new HadoopOutputFormat<Text, IntWritable>(new TextOutputFormat<Text, IntWritable>(), job);
 		hadoopOutputFormat.getConfiguration().set("mapreduce.output.textoutputformat.separator", " ");
 		hadoopOutputFormat.getConfiguration().set("mapred.textoutputformat.separator", " "); // set the value for both, since this test
 		TextOutputFormat.setOutputPath(job, new Path(outputPath));
-		
+
 		// Output & Execute
 		hadoopResult.output(hadoopOutputFormat);
 		env.execute("Word Count");
 	}
-	
+
 	/**
 	 * Splits a line into words and converts Hadoop Writables into normal Java data types.
 	 */
 	public static final class Tokenizer extends RichFlatMapFunction<Tuple2<LongWritable, Text>, Tuple2<String, Integer>> {
-		
+
 		@Override
 		public void flatMap(Tuple2<LongWritable, Text> value, Collector<Tuple2<String, Integer>> out) {
 			// normalize and split the line
 			String line = value.f1.toString();
 			String[] tokens = line.toLowerCase().split("\\W+");
-			
+
 			// emit the pairs
 			for (String token : tokens) {
 				if (token.length() > 0) {
@@ -103,17 +104,17 @@ public class WordCount {
 			}
 		}
 	}
-	
+
 	/**
 	 * Converts Java data types to Hadoop Writables.
 	 */
 	public static final class HadoopDatatypeMapper extends RichMapFunction<Tuple2<String, Integer>, Tuple2<Text, IntWritable>> {
-		
+
 		@Override
 		public Tuple2<Text, IntWritable> map(Tuple2<String, Integer> value) throws Exception {
 			return new Tuple2<Text, IntWritable>(new Text(value.f0), new IntWritable(value.f1));
 		}
-		
+
 	}
-	
+
 }


[05/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-hbase

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-hbase


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/43183ad2
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/43183ad2
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/43183ad2

Branch: refs/heads/master
Commit: 43183ad2ca6326dc6021d4f880f5095454c6952d
Parents: 23920bb
Author: zentol <ch...@apache.org>
Authored: Wed May 24 23:12:36 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:09 2017 +0200

----------------------------------------------------------------------
 .../addons/hbase/AbstractTableInputFormat.java  | 17 +++---
 .../flink/addons/hbase/HBaseRowInputFormat.java | 11 ++--
 .../flink/addons/hbase/HBaseTableSchema.java    |  9 +--
 .../flink/addons/hbase/HBaseTableSource.java    | 12 ++--
 .../flink/addons/hbase/TableInputFormat.java    |  5 +-
 .../flink/addons/hbase/TableInputSplit.java     | 12 ++--
 .../addons/hbase/HBaseConnectorITCase.java      | 15 +++--
 .../hbase/HBaseTestingClusterAutostarter.java   |  6 +-
 .../hbase/example/HBaseFlinkTestConstants.java  | 14 ++---
 .../addons/hbase/example/HBaseReadExample.java  | 30 +++++-----
 .../addons/hbase/example/HBaseWriteExample.java | 61 ++++++++++----------
 .../hbase/example/HBaseWriteStreamExample.java  | 11 ++--
 .../src/test/resources/log4j-test.properties    |  4 +-
 13 files changed, 113 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/AbstractTableInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/AbstractTableInputFormat.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/AbstractTableInputFormat.java
index 59ba5b1f..73a21b3 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/AbstractTableInputFormat.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/AbstractTableInputFormat.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.addons.hbase;
 
 import org.apache.flink.api.common.io.InputFormat;
@@ -23,6 +24,7 @@ import org.apache.flink.api.common.io.RichInputFormat;
 import org.apache.flink.api.common.io.statistics.BaseStatistics;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.io.InputSplitAssigner;
+
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -32,7 +34,6 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -42,7 +43,7 @@ import java.util.List;
  */
 public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, TableInputSplit> {
 
-	protected static Logger LOG = LoggerFactory.getLogger(AbstractTableInputFormat.class);
+	protected static final Logger LOG = LoggerFactory.getLogger(AbstractTableInputFormat.class);
 
 	// helper variable to decide whether the input is exhausted or not
 	protected boolean endReached = false;
@@ -50,7 +51,7 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 	protected transient HTable table = null;
 	protected transient Scan scan = null;
 
-	/** HBase iterator wrapper */
+	/** HBase iterator wrapper. */
 	protected ResultScanner resultScanner = null;
 
 	protected byte[] currentRow;
@@ -65,7 +66,8 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 
 	/**
 	 * What table is to be read.
-	 * Per instance of a TableInputFormat derivative only a single table name is possible.
+	 *
+	 * <p>Per instance of a TableInputFormat derivative only a single table name is possible.
 	 *
 	 * @return The name of the table
 	 */
@@ -74,7 +76,7 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 	/**
 	 * HBase returns an instance of {@link Result}.
 	 *
-	 * This method maps the returned {@link Result} instance into the output type {@link T}.
+	 * <p>This method maps the returned {@link Result} instance into the output type {@link T}.
 	 *
 	 * @param r The Result instance from HBase that needs to be converted
 	 * @return The appropriate instance of {@link T} that contains the data of Result.
@@ -83,10 +85,11 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 
 	/**
 	 * Creates a {@link Scan} object and opens the {@link HTable} connection.
-	 * These are opened here because they are needed in the createInputSplits
+	 *
+	 * <p>These are opened here because they are needed in the createInputSplits
 	 * which is called before the openInputFormat method.
 	 *
-	 * The connection is opened in this method and closed in {@link #closeInputFormat()}.
+	 * <p>The connection is opened in this method and closed in {@link #closeInputFormat()}.
 	 *
 	 * @param parameters The configuration that is to be used
 	 * @see Configuration

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseRowInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseRowInputFormat.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseRowInputFormat.java
index fff2a9e..dde24f0 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseRowInputFormat.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseRowInputFormat.java
@@ -24,12 +24,13 @@ import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.types.Row;
+
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -98,7 +99,7 @@ public class HBaseRowInputFormat extends AbstractTableInputFormat<Row> implement
 	public void configure(Configuration parameters) {
 		LOG.info("Initializing HBase configuration.");
 		connectToTable();
-		if(table != null) {
+		if (table != null) {
 			scan = getScanner();
 		}
 
@@ -144,7 +145,7 @@ public class HBaseRowInputFormat extends AbstractTableInputFormat<Row> implement
 				int typeIdx = types[f][q];
 				// read value
 				byte[] value = res.getValue(familyKey, qualifier);
-				if(value != null) {
+				if (value != null) {
 					familyRow.setField(q, deserialize(value, typeIdx));
 				} else {
 					familyRow.setField(q, null);
@@ -164,10 +165,10 @@ public class HBaseRowInputFormat extends AbstractTableInputFormat<Row> implement
 		try {
 			Connection conn = ConnectionFactory.createConnection(conf);
 			super.table = (HTable) conn.getTable(TableName.valueOf(tableName));
-		} catch(TableNotFoundException tnfe) {
+		} catch (TableNotFoundException tnfe) {
 			LOG.error("The table " + tableName + " not found ", tnfe);
 			throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
-		} catch(IOException ioe) {
+		} catch (IOException ioe) {
 			LOG.error("Exception while creating connection to HBase.", ioe);
 			throw new RuntimeException("Cannot create connection to HBase.", ioe);
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSchema.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSchema.java
index b6b3916..fee9fa9 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSchema.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSchema.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.addons.hbase;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -27,7 +28,7 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 
 /**
- * Helps to specify an HBase Table's schema
+ * Helps to specify an HBase Table's schema.
  */
 public class HBaseTableSchema implements Serializable {
 
@@ -52,7 +53,7 @@ public class HBaseTableSchema implements Serializable {
 
 		if (!HBaseRowInputFormat.isSupportedType(clazz)) {
 			// throw exception
-			throw new IllegalArgumentException("Unsupported class type found " + clazz+". " +
+			throw new IllegalArgumentException("Unsupported class type found " + clazz + ". " +
 				"Better to use byte[].class and deserialize using user defined scalar functions");
 		}
 
@@ -91,7 +92,7 @@ public class HBaseTableSchema implements Serializable {
 
 		byte[][] familyKeys = new byte[this.familyMap.size()][];
 		int i = 0;
-		for(String name : this.familyMap.keySet()) {
+		for (String name : this.familyMap.keySet()) {
 			familyKeys[i++] = name.getBytes(c);
 		}
 		return familyKeys;
@@ -135,7 +136,7 @@ public class HBaseTableSchema implements Serializable {
 
 		byte[][] qualifierKeys = new byte[qualifierMap.size()][];
 		int i = 0;
-		for(String name : qualifierMap.keySet()) {
+		for (String name : qualifierMap.keySet()) {
 			qualifierKeys[i++] = name.getBytes(c);
 		}
 		return qualifierKeys;

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSource.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSource.java
index f709212..cc7e602 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSource.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/HBaseTableSource.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.addons.hbase;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -25,6 +26,7 @@ import org.apache.flink.table.sources.BatchTableSource;
 import org.apache.flink.table.sources.ProjectableTableSource;
 import org.apache.flink.types.Row;
 import org.apache.flink.util.Preconditions;
+
 import org.apache.hadoop.conf.Configuration;
 
 import java.util.Map;
@@ -32,12 +34,12 @@ import java.util.Map;
 /**
  * Creates a TableSource to scan an HBase table.
  *
- * The table name and required HBase configuration is passed during {@link HBaseTableSource} construction.
+ * <p>The table name and required HBase configuration is passed during {@link HBaseTableSource} construction.
  * Use {@link #addColumn(String, String, Class)} to specify the family, qualifier, and type of columns to scan.
  *
- * The TableSource returns {@link Row} with nested Rows for each column family.
+ * <p>The TableSource returns {@link Row} with nested Rows for each column family.
  *
- * The HBaseTableSource is used as shown in the example below.
+ * <p>The HBaseTableSource is used as shown in the example below.
  *
  * <pre>
  * {@code
@@ -112,10 +114,10 @@ public class HBaseTableSource implements BatchTableSource<Row>, ProjectableTable
 		String[] famNames = schema.getFamilyNames();
 		HBaseTableSource newTableSource = new HBaseTableSource(this.conf, tableName);
 		// Extract the family from the given fields
-		for(int field : fields) {
+		for (int field : fields) {
 			String family = famNames[field];
 			Map<String, TypeInformation<?>> familyInfo = schema.getFamilyInfo(family);
-			for(String qualifier : familyInfo.keySet()) {
+			for (String qualifier : familyInfo.keySet()) {
 				// create the newSchema
 				newTableSource.addColumn(family, qualifier, familyInfo.get(qualifier).getTypeClass());
 			}

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
index 6ea2d04..52fd012 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
@@ -15,17 +15,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.addons.hbase;
 
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.api.java.tuple.Tuple;
 import org.apache.flink.configuration.Configuration;
+
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 
-
 /**
  * {@link InputFormat} subclass that wraps the access for HTables.
  */
@@ -72,7 +73,7 @@ public abstract class TableInputFormat<T extends Tuple> extends AbstractTableInp
 	}
 
 	/**
-	 * Create an {@link HTable} instance and set it into this format
+	 * Create an {@link HTable} instance and set it into this format.
 	 */
 	private HTable createTable() {
 		LOG.info("Initializing HBaseConfiguration");

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputSplit.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputSplit.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputSplit.java
index 75f0b9b..d265bd4 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputSplit.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputSplit.java
@@ -28,7 +28,7 @@ public class TableInputSplit extends LocatableInputSplit {
 
 	private static final long serialVersionUID = 1L;
 
-	/** The name of the table to retrieve data from */
+	/** The name of the table to retrieve data from. */
 	private final byte[] tableName;
 
 	/** The start row of the split. */
@@ -38,8 +38,8 @@ public class TableInputSplit extends LocatableInputSplit {
 	private final byte[] endRow;
 
 	/**
-	 * Creates a new table input split
-	 * 
+	 * Creates a new table input split.
+	 *
 	 * @param splitNumber
 	 *        the number of the input split
 	 * @param hostnames
@@ -62,7 +62,7 @@ public class TableInputSplit extends LocatableInputSplit {
 
 	/**
 	 * Returns the table name.
-	 * 
+	 *
 	 * @return The table name.
 	 */
 	public byte[] getTableName() {
@@ -71,7 +71,7 @@ public class TableInputSplit extends LocatableInputSplit {
 
 	/**
 	 * Returns the start row.
-	 * 
+	 *
 	 * @return The start row.
 	 */
 	public byte[] getStartRow() {
@@ -80,7 +80,7 @@ public class TableInputSplit extends LocatableInputSplit {
 
 	/**
 	 * Returns the end row.
-	 * 
+	 *
 	 * @return The end row.
 	 */
 	public byte[] getEndRow() {

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
index 33bbe12..5d71ca5 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
@@ -17,6 +17,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.flink.addons.hbase;
 
 import org.apache.flink.api.common.functions.ReduceFunction;
@@ -34,6 +35,7 @@ import org.apache.flink.table.api.java.BatchTableEnvironment;
 import org.apache.flink.table.functions.ScalarFunction;
 import org.apache.flink.test.util.TestBaseUtils;
 import org.apache.flink.types.Row;
+
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
@@ -55,7 +57,7 @@ import static org.junit.Assert.assertEquals;
  * - TableInputFormat
  * - HBaseTableSource
  *
- * These tests are located in a single test file to avoided unnecessary initializations of the
+ * <p>These tests are located in a single test file to avoided unnecessary initializations of the
  * HBaseTestingCluster which takes about half a minute.
  *
  */
@@ -290,6 +292,9 @@ public class HBaseConnectorITCase extends HBaseTestingClusterAutostarter {
 		TestBaseUtils.compareResultAsText(results, expected);
 	}
 
+	/**
+	 * A {@link ScalarFunction} that maps byte arrays to UTF-8 strings.
+	 */
 	public static class ToUTF8 extends ScalarFunction {
 
 		public String eval(byte[] bytes) {
@@ -297,6 +302,9 @@ public class HBaseConnectorITCase extends HBaseTestingClusterAutostarter {
 		}
 	}
 
+	/**
+	 * A {@link ScalarFunction} that maps byte array to longs.
+	 */
 	public static class ToLong extends ScalarFunction {
 
 		public long eval(byte[] bytes) {
@@ -342,16 +350,15 @@ public class HBaseConnectorITCase extends HBaseTestingClusterAutostarter {
 		List<Tuple1<Integer>> resultSet = result.collect();
 
 		assertEquals(1, resultSet.size());
-		assertEquals(360, (int)resultSet.get(0).f0);
+		assertEquals(360, (int) resultSet.get(0).f0);
 	}
 
-
 	/**
 	 * Allows the tests to use {@link ExecutionEnvironment#getExecutionEnvironment()} but with a
 	 * configuration that limits the maximum memory used for network buffers since the current
 	 * defaults are too high for Travis-CI.
 	 */
-	private static abstract class LimitNetworkBuffersTestEnvironment extends ExecutionEnvironment {
+	private abstract static class LimitNetworkBuffersTestEnvironment extends ExecutionEnvironment {
 
 		public static void setAsContext() {
 			Configuration config = new Configuration();

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
index 727a5b1..e4b2bd2 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
@@ -20,10 +20,11 @@
 
 package org.apache.flink.addons.hbase;
 
+import org.apache.flink.util.TestLogger;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.flink.util.TestLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -95,7 +96,7 @@ public class HBaseTestingClusterAutostarter extends TestLogger implements Serial
 
 		assertNotNull("HBaseAdmin is not initialized successfully.", admin);
 		HTableDescriptor desc = new HTableDescriptor(tableName);
-		for(byte[] fam : columnFamilyName) {
+		for (byte[] fam : columnFamilyName) {
 			HColumnDescriptor colDef = new HColumnDescriptor(fam);
 			desc.addFamily(colDef);
 		}
@@ -195,6 +196,7 @@ public class HBaseTestingClusterAutostarter extends TestLogger implements Serial
 	public static Configuration getConf() {
 		return conf;
 	}
+
 	private static void createHBaseSiteXml(File hbaseSiteXmlDirectory, String zookeeperQuorum) {
 		hbaseSiteXmlFile = new File(hbaseSiteXmlDirectory, "hbase-site.xml");
 		// Create the hbase-site.xml file for this run.

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseFlinkTestConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseFlinkTestConstants.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseFlinkTestConstants.java
index f56295e..57224c2 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseFlinkTestConstants.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseFlinkTestConstants.java
@@ -20,11 +20,11 @@ package org.apache.flink.addons.hbase.example;
 
 import org.apache.flink.configuration.ConfigConstants;
 
-public class HBaseFlinkTestConstants {
-	
-	public static final byte[] CF_SOME = "someCf".getBytes(ConfigConstants.DEFAULT_CHARSET);
-	public static final byte[] Q_SOME = "someQual".getBytes(ConfigConstants.DEFAULT_CHARSET);
-	public static final String TEST_TABLE_NAME = "test-table";
-	public static final String TMP_DIR = "/tmp/test";
-	
+class HBaseFlinkTestConstants {
+
+	static final byte[] CF_SOME = "someCf".getBytes(ConfigConstants.DEFAULT_CHARSET);
+	static final byte[] Q_SOME = "someQual".getBytes(ConfigConstants.DEFAULT_CHARSET);
+	static final String TEST_TABLE_NAME = "test-table";
+	static final String TMP_DIR = "/tmp/test";
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseReadExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseReadExample.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseReadExample.java
index dccf876..817ae09 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseReadExample.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseReadExample.java
@@ -23,31 +23,32 @@ import org.apache.flink.api.common.functions.FilterFunction;
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple2;
+
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Simple stub for HBase DataSet read
- * 
- * To run the test first create the test table with hbase shell.
- * 
- * Use the following commands:
+ *
+ * <p>To run the test first create the test table with hbase shell.
+ *
+ * <p>Use the following commands:
  * <ul>
  *     <li>create 'test-table', 'someCf'</li>
  *     <li>put 'test-table', '1', 'someCf:someQual', 'someString'</li>
  *     <li>put 'test-table', '2', 'someCf:someQual', 'anotherString'</li>
  * </ul>
- * 
- * The test should return just the first entry.
- * 
+ *
+ * <p>The test should return just the first entry.
+ *
  */
 public class HBaseReadExample {
 	public static void main(String[] args) throws Exception {
 		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
 		@SuppressWarnings("serial")
 		DataSet<Tuple2<String, String>> hbaseDs = env.createInput(new TableInputFormat<Tuple2<String, String>>() {
-			
+
 				@Override
 				public String getTableName() {
 					return HBaseFlinkTestConstants.TEST_TABLE_NAME;
@@ -61,7 +62,7 @@ public class HBaseReadExample {
 				}
 
 				private Tuple2<String, String> reuse = new Tuple2<String, String>();
-				
+
 				@Override
 				protected Tuple2<String, String> mapResultToTuple(Result r) {
 					String key = Bytes.toString(r.getRow());
@@ -71,22 +72,23 @@ public class HBaseReadExample {
 					return reuse;
 				}
 		})
-		.filter(new FilterFunction<Tuple2<String,String>>() {
+		.filter(new FilterFunction<Tuple2<String, String>>() {
 
 			@Override
 			public boolean filter(Tuple2<String, String> t) throws Exception {
 				String val = t.getField(1);
-				if(val.startsWith("someStr"))
+				if (val.startsWith("someStr")) {
 					return true;
+				}
 				return false;
 			}
 		});
-		
+
 		hbaseDs.print();
-		
+
 		// kick off execution.
 		env.execute();
-				
+
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteExample.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteExample.java
index 64d20c3..ca82392 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteExample.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteExample.java
@@ -27,6 +27,7 @@ import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.util.Collector;
+
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
@@ -36,36 +37,36 @@ import org.apache.hadoop.mapreduce.Job;
 
 /**
  * Simple stub for HBase DataSet write
- * 
- * To run the test first create the test table with hbase shell.
- * 
- * Use the following commands:
+ *
+ * <p>To run the test first create the test table with hbase shell.
+ *
+ * <p>Use the following commands:
  * <ul>
  *     <li>create 'test-table', 'someCf'</li>
  * </ul>
- * 
+ *
  */
 @SuppressWarnings("serial")
 public class HBaseWriteExample {
-	
+
 	// *************************************************************************
 	//     PROGRAM
 	// *************************************************************************
-	
+
 	public static void main(String[] args) throws Exception {
 
-		if(!parseParameters(args)) {
+		if (!parseParameters(args)) {
 			return;
 		}
-		
+
 		// set up the execution environment
 		final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-		
+
 		// get input data
 		DataSet<String> text = getTextDataSet(env);
-		
-		DataSet<Tuple2<String, Integer>> counts = 
-				// split up the lines in pairs (2-tuples) containing: (word,1)
+
+		DataSet<Tuple2<String, Integer>> counts =
+				// split up the lines in pairs (2-tuples) containing: (word, 1)
 				text.flatMap(new Tokenizer())
 				// group by the tuple field "0" and sum up tuple field "1"
 				.groupBy(0)
@@ -75,8 +76,8 @@ public class HBaseWriteExample {
 		Job job = Job.getInstance();
 		job.getConfiguration().set(TableOutputFormat.OUTPUT_TABLE, outputTableName);
 		// TODO is "mapred.output.dir" really useful?
-		job.getConfiguration().set("mapred.output.dir",HBaseFlinkTestConstants.TMP_DIR);
-		counts.map(new RichMapFunction <Tuple2<String,Integer>, Tuple2<Text,Mutation>>() {
+		job.getConfiguration().set("mapred.output.dir", HBaseFlinkTestConstants.TMP_DIR);
+		counts.map(new RichMapFunction <Tuple2<String, Integer>, Tuple2<Text, Mutation>>() {
 			private transient Tuple2<Text, Mutation> reuse;
 
 			@Override
@@ -89,24 +90,24 @@ public class HBaseWriteExample {
 			public Tuple2<Text, Mutation> map(Tuple2<String, Integer> t) throws Exception {
 				reuse.f0 = new Text(t.f0);
 				Put put = new Put(t.f0.getBytes(ConfigConstants.DEFAULT_CHARSET));
-				put.add(HBaseFlinkTestConstants.CF_SOME,HBaseFlinkTestConstants.Q_SOME, Bytes.toBytes(t.f1));
+				put.add(HBaseFlinkTestConstants.CF_SOME, HBaseFlinkTestConstants.Q_SOME, Bytes.toBytes(t.f1));
 				reuse.f1 = put;
 				return reuse;
 			}
 		}).output(new HadoopOutputFormat<Text, Mutation>(new TableOutputFormat<Text>(), job));
-		
+
 		// execute program
 		env.execute("WordCount (HBase sink) Example");
 	}
-	
+
 	// *************************************************************************
 	//     USER FUNCTIONS
 	// *************************************************************************
-	
+
 	/**
 	 * Implements the string tokenizer that splits sentences into words as a user-defined
-	 * FlatMapFunction. The function takes a line (String) and splits it into 
-	 * multiple pairs in the form of "(word,1)" (Tuple2<String, Integer>).
+	 * FlatMapFunction. The function takes a line (String) and splits it into
+	 * multiple pairs in the form of "(word, 1)" (Tuple2&lt;String, Integer&gt;).
 	 */
 	public static final class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {
 
@@ -114,7 +115,7 @@ public class HBaseWriteExample {
 		public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
 			// normalize and split the line
 			String[] tokens = value.toLowerCase().split("\\W+");
-			
+
 			// emit the pairs
 			for (String token : tokens) {
 				if (token.length() > 0) {
@@ -123,20 +124,20 @@ public class HBaseWriteExample {
 			}
 		}
 	}
-	
+
 	// *************************************************************************
 	//     UTIL METHODS
 	// *************************************************************************
 	private static boolean fileOutput = false;
 	private static String textPath;
 	private static String outputTableName = HBaseFlinkTestConstants.TEST_TABLE_NAME;
-	
+
 	private static boolean parseParameters(String[] args) {
-		
-		if(args.length > 0) {
+
+		if (args.length > 0) {
 			// parse input arguments
 			fileOutput = true;
-			if(args.length == 2) {
+			if (args.length == 2) {
 				textPath = args[0];
 				outputTableName = args[1];
 			} else {
@@ -150,9 +151,9 @@ public class HBaseWriteExample {
 		}
 		return true;
 	}
-	
+
 	private static DataSet<String> getTextDataSet(ExecutionEnvironment env) {
-		if(fileOutput) {
+		if (fileOutput) {
 			// read the text file from given input path
 			return env.readTextFile(textPath);
 		} else {
@@ -160,9 +161,11 @@ public class HBaseWriteExample {
 			return getDefaultTextLineDataSet(env);
 		}
 	}
+
 	private static DataSet<String> getDefaultTextLineDataSet(ExecutionEnvironment env) {
 		return env.fromElements(WORDS);
 	}
+
 	private static final String[] WORDS = new String[] {
 		"To be, or not to be,--that is the question:--",
 		"Whether 'tis nobler in the mind to suffer",

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteStreamExample.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteStreamExample.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteStreamExample.java
index 05398db..1ed471d 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteStreamExample.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/example/HBaseWriteStreamExample.java
@@ -17,27 +17,26 @@
 
 package org.apache.flink.addons.hbase.example;
 
-import java.io.IOException;
-
 import org.apache.flink.api.common.io.OutputFormat;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
+
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
 
+import java.io.IOException;
+
 /**
- * 
  * This is an example how to write streams into HBase. In this example the
  * stream will be written into a local Hbase but it is possible to adapt this
  * example for an HBase running in a cloud. You need a running local HBase with a
  * table "flinkExample" and a column "entry". If your HBase configuration does
  * not fit the hbase-site.xml in the resource folder then you gave to delete temporary this
  * hbase-site.xml to execute the example properly.
- * 
  */
 public class HBaseWriteStreamExample {
 
@@ -70,9 +69,7 @@ public class HBaseWriteStreamExample {
 	}
 
 	/**
-	 * 
-	 * This class implements an OutputFormat for HBase
-	 *
+	 * This class implements an OutputFormat for HBase.
 	 */
 	private static class HBaseOutputFormat implements OutputFormat<String> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/43183ad2/flink-connectors/flink-hbase/src/test/resources/log4j-test.properties
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/resources/log4j-test.properties b/flink-connectors/flink-hbase/src/test/resources/log4j-test.properties
index 804ff45..25dd575 100644
--- a/flink-connectors/flink-hbase/src/test/resources/log4j-test.properties
+++ b/flink-connectors/flink-hbase/src/test/resources/log4j-test.properties
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 # http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY


[21/21] flink git commit: [FLINK-6711] Activate strict checkstyle for flink-connectors

Posted by ch...@apache.org.
[FLINK-6711] Activate strict checkstyle for flink-connectors

This closes #3992.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4f50dc4d
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4f50dc4d
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4f50dc4d

Branch: refs/heads/master
Commit: 4f50dc4df14d00203a03873edaf4252082ef4d38
Parents: b58545e
Author: zentol <ch...@apache.org>
Authored: Thu May 25 18:52:40 2017 +0200
Committer: zentol <ch...@apache.org>
Committed: Sun May 28 00:11:50 2017 +0200

----------------------------------------------------------------------
 flink-connectors/pom.xml | 39 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/4f50dc4d/flink-connectors/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/pom.xml b/flink-connectors/pom.xml
index 5af85b8..cbb48e0 100644
--- a/flink-connectors/pom.xml
+++ b/flink-connectors/pom.xml
@@ -101,5 +101,44 @@ under the License.
 			</modules>
 		</profile>
 	</profiles>
+	
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-checkstyle-plugin</artifactId>
+				<version>2.17</version>
+				<dependencies>
+					<dependency>
+						<groupId>com.puppycrawl.tools</groupId>
+						<artifactId>checkstyle</artifactId>
+						<version>6.19</version>
+					</dependency>
+				</dependencies>
+				<configuration>
+					<configLocation>/tools/maven/strict-checkstyle.xml</configLocation>
+					<suppressionsLocation>/tools/maven/suppressions.xml</suppressionsLocation>
+					<includeTestSourceDirectory>true</includeTestSourceDirectory>
+					<logViolationsToConsole>true</logViolationsToConsole>
+					<failOnViolation>true</failOnViolation>
+				</configuration>
+				<executions>
+					<!--
+					Execute checkstyle after compilation but before tests.
+
+					This ensures that any parsing or type checking errors are from
+					javac, so they look as expected. Beyond that, we want to
+					fail as early as possible.
+					-->
+					<execution>
+						<phase>test-compile</phase>
+						<goals>
+							<goal>check</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
 
 </project>