You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by tz...@apache.org on 2018/01/12 13:30:53 UTC

[01/19] flink git commit: [FLINK-8199] [elasticsearch] Properly annotate APIs of Elasticsearch connector

Repository: flink
Updated Branches:
  refs/heads/master 06922753a -> 4ade82631


[FLINK-8199] [elasticsearch] Properly annotate APIs of Elasticsearch connector

This closes #5124.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/9b5fce6b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/9b5fce6b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/9b5fce6b

Branch: refs/heads/master
Commit: 9b5fce6b1d55205054abbdf274df7af72d1fd263
Parents: 5a318de
Author: zhangminglei <zm...@163.com>
Authored: Mon Dec 18 15:52:03 2017 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../connectors/elasticsearch/ActionRequestFailureHandler.java     | 3 +++
 .../streaming/connectors/elasticsearch/BulkProcessorIndexer.java  | 3 +++
 .../connectors/elasticsearch/ElasticsearchApiCallBridge.java      | 3 +++
 .../streaming/connectors/elasticsearch/ElasticsearchSinkBase.java | 2 ++
 .../connectors/elasticsearch/ElasticsearchSinkFunction.java       | 2 ++
 .../flink/streaming/connectors/elasticsearch/RequestIndexer.java  | 3 +++
 .../connectors/elasticsearch/util/ElasticsearchUtils.java         | 3 +++
 .../connectors/elasticsearch/util/NoOpFailureHandler.java         | 2 ++
 .../elasticsearch/util/RetryRejectedExecutionFailureHandler.java  | 2 ++
 .../connectors/elasticsearch/Elasticsearch1ApiCallBridge.java     | 2 ++
 .../streaming/connectors/elasticsearch/ElasticsearchSink.java     | 2 ++
 .../elasticsearch/IndexRequestBuilderWrapperFunction.java         | 2 ++
 .../connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java    | 2 ++
 .../streaming/connectors/elasticsearch2/ElasticsearchSink.java    | 2 ++
 .../elasticsearch2/OldNewElasticsearchSinkFunctionBridge.java     | 2 ++
 .../connectors/elasticsearch2/OldNewRequestIndexerBridge.java     | 3 +++
 .../connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java    | 2 ++
 .../streaming/connectors/elasticsearch5/ElasticsearchSink.java    | 2 ++
 18 files changed, 42 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
index 3ca1417..260f80e 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import org.elasticsearch.action.ActionRequest;
 
 import java.io.Serializable;
@@ -56,6 +58,7 @@ import java.io.Serializable;
  * could not be retrieved through the older version Java client APIs (thus, the types will be general {@link Exception}s
  * and only differ in the failure message). In this case, it is recommended to match on the provided REST status code.
  */
+@PublicEvolving
 public interface ActionRequestFailureHandler extends Serializable {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
index 3e290ff..2ebb97c 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.Internal;
+
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkProcessor;
 
@@ -29,6 +31,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}.
  * {@link ActionRequest ActionRequests} will be buffered before sending a bulk request to the Elasticsearch cluster.
  */
+@Internal
 class BulkProcessorIndexer implements RequestIndexer {
 
 	private final BulkProcessor bulkProcessor;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
index ce98dfb..2a7a216 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.Internal;
+
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.client.Client;
@@ -36,6 +38,7 @@ import java.util.Map;
  * is allowed, the call bridge will hold reference to the created embedded node. Each instance of the sink will hold
  * exactly one instance of the call bridge, and state cleanup is performed when the sink is closed.
  */
+@Internal
 public interface ElasticsearchApiCallBridge extends Serializable {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
index fe4343f..d3e0e87 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.configuration.Configuration;
@@ -61,6 +62,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * @param <T> Type of the elements handled by this sink
  */
+@Internal
 public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> implements CheckpointedFunction {
 
 	private static final long serialVersionUID = -1007596293618451942L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
index 8248204..1b5ce1e 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.functions.Function;
 import org.apache.flink.api.common.functions.RuntimeContext;
 
@@ -56,6 +57,7 @@ import java.io.Serializable;
  *
  * @param <T> The type of the element handled by this {@code ElasticsearchSinkFunction}
  */
+@PublicEvolving
 public interface ElasticsearchSinkFunction<T> extends Serializable, Function {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
index cfa166e..2a1b297 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
@@ -18,12 +18,15 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import org.elasticsearch.action.ActionRequest;
 
 /**
  * Users add multiple {@link ActionRequest ActionRequests} to a {@link RequestIndexer} to prepare
  * them for sending to an Elasticsearch cluster.
  */
+@PublicEvolving
 public interface RequestIndexer {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/ElasticsearchUtils.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/ElasticsearchUtils.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/ElasticsearchUtils.java
index 9776c4c..11eede4 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/ElasticsearchUtils.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/ElasticsearchUtils.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch.util;
 
+import org.apache.flink.annotation.Internal;
+
 import org.elasticsearch.common.transport.InetSocketTransportAddress;
 import org.elasticsearch.common.transport.TransportAddress;
 
@@ -28,6 +30,7 @@ import java.util.List;
 /**
  * Suite of utility methods for Elasticsearch.
  */
+@Internal
 public class ElasticsearchUtils {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
index dffee20..dfcb9ee 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch.util;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
 
@@ -25,6 +26,7 @@ import org.elasticsearch.action.ActionRequest;
 /**
  * An {@link ActionRequestFailureHandler} that simply fails the sink on any failures.
  */
+@Internal
 public class NoOpFailureHandler implements ActionRequestFailureHandler {
 
 	private static final long serialVersionUID = 737941343410827885L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
index 3706257..ca710cb 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch.util;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
 import org.apache.flink.util.ExceptionUtils;
@@ -30,6 +31,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
  * {@link EsRejectedExecutionException}s (which means that Elasticsearch node queues are currently full),
  * and fails for all other failures.
  */
+@PublicEvolving
 public class RetryRejectedExecutionFailureHandler implements ActionRequestFailureHandler {
 
 	private static final long serialVersionUID = -7423562912824511906L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
index 5659ee6..2a3c2a0 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.util.Preconditions;
 
 import org.elasticsearch.action.bulk.BulkItemResponse;
@@ -40,6 +41,7 @@ import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
 /**
  * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 1.x.
  */
+@Internal
 public class Elasticsearch1ApiCallBridge implements ElasticsearchApiCallBridge {
 
 	private static final long serialVersionUID = -2632363720584123682L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
index bc5ac84..9dd8209 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
 
 import org.elasticsearch.action.ActionRequest;
@@ -62,6 +63,7 @@ import java.util.Map;
  *
  * @param <T> Type of the elements handled by this sink
  */
+@PublicEvolving
 public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
 
 	private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilderWrapperFunction.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilderWrapperFunction.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilderWrapperFunction.java
index 6f1d138..18a723b 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilderWrapperFunction.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/IndexRequestBuilderWrapperFunction.java
@@ -18,12 +18,14 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.functions.RuntimeContext;
 
 /**
  * A dummy {@link ElasticsearchSinkFunction} that wraps a {@link IndexRequestBuilder}.
  * This serves as a bridge for the usage deprecation of the {@code IndexRequestBuilder} interface.
  */
+@Internal
 class IndexRequestBuilderWrapperFunction<T> implements ElasticsearchSinkFunction<T> {
 
 	private static final long serialVersionUID = 289876038414250101L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
index 66b676c..390a407 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallBridge;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.util.ElasticsearchUtils;
@@ -42,6 +43,7 @@ import java.util.Map;
 /**
  * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 2.x.
  */
+@Internal
 public class Elasticsearch2ApiCallBridge implements ElasticsearchApiCallBridge {
 
 	private static final long serialVersionUID = 2638252694744361079L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
index 0c991a6..a17b4d8 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
@@ -56,6 +57,7 @@ import java.util.Map;
  *
  * @param <T> Type of the elements handled by this sink
  */
+@PublicEvolving
 public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
 
 	private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewElasticsearchSinkFunctionBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewElasticsearchSinkFunctionBridge.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewElasticsearchSinkFunctionBridge.java
index c95fff5..0dc5c28 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewElasticsearchSinkFunctionBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewElasticsearchSinkFunctionBridge.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
 
@@ -24,6 +25,7 @@ import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
  * A dummy {@link org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction} to bridge
  * the migration from the deprecated {@link ElasticsearchSinkFunction}.
  */
+@Internal
 class OldNewElasticsearchSinkFunctionBridge<T> implements org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction<T> {
 
 	private static final long serialVersionUID = 2415651895272659448L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewRequestIndexerBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewRequestIndexerBridge.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewRequestIndexerBridge.java
index f42fb44..d2e6900 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewRequestIndexerBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/OldNewRequestIndexerBridge.java
@@ -17,12 +17,15 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
+import org.apache.flink.annotation.Internal;
+
 import org.elasticsearch.action.ActionRequest;
 
 /**
  * A dummy {@link org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer} to bridge
  * the migration from the deprecated {@link RequestIndexer}.
  */
+@Internal
 class OldNewRequestIndexerBridge implements RequestIndexer {
 
 	private static final long serialVersionUID = 4213982619497149416L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
index ffb572d..7c4ba7a 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch5;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallBridge;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.util.ElasticsearchUtils;
@@ -45,6 +46,7 @@ import java.util.Map;
 /**
  * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 5.x.
  */
+@Internal
 public class Elasticsearch5ApiCallBridge implements ElasticsearchApiCallBridge {
 
 	private static final long serialVersionUID = -5222683870097809633L;

http://git-wip-us.apache.org/repos/asf/flink/blob/9b5fce6b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
index 0f1cc91..3307b2c 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch5;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
@@ -57,6 +58,7 @@ import java.util.Map;
  *
  * @param <T> Type of the elements handled by this sink
  */
+@PublicEvolving
 public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
 
 	private static final long serialVersionUID = 1L;


[06/19] flink git commit: [FLINK-6951] [kinesis] Shade httpcomponents dependency for Kinesis connector

Posted by tz...@apache.org.
[FLINK-6951] [kinesis] Shade httpcomponents dependency for Kinesis connector

This closes #4150.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/77e63e6a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/77e63e6a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/77e63e6a

Branch: refs/heads/master
Commit: 77e63e6a76937c81c2641a5c46a9a53c0b57b309
Parents: d53a722
Author: Bowen Li <bo...@gmail.com>
Authored: Wed Jun 21 15:55:19 2017 -0700
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 flink-connectors/flink-connector-kinesis/pom.xml | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/77e63e6a/flink-connectors/flink-connector-kinesis/pom.xml
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/pom.xml b/flink-connectors/flink-connector-kinesis/pom.xml
index f91228b..99629bf 100644
--- a/flink-connectors/flink-connector-kinesis/pom.xml
+++ b/flink-connectors/flink-connector-kinesis/pom.xml
@@ -154,6 +154,7 @@ under the License.
 								<includes>
 									<include>com.amazonaws:*</include>
 									<include>com.google.protobuf:*</include>
+									<include>org.apache.httpcomponents:*</include>
 								</includes>
 							</artifactSet>
 							<relocations combine.children="override">
@@ -166,6 +167,10 @@ under the License.
 									<pattern>com.amazonaws</pattern>
 									<shadedPattern>org.apache.flink.kinesis.shaded.com.amazonaws</shadedPattern>
 								</relocation>
+								<relocation>
+									<pattern>org.apache.http</pattern>
+									<shadedPattern>org.apache.flink.kinesis.shaded.org.apache.http</shadedPattern>
+								</relocation>
 							</relocations>
 						</configuration>
 					</execution>


[15/19] flink git commit: [FLINK-8217] [kinesis] Properly annotate APIs of flink-connector-kinesis

Posted by tz...@apache.org.
[FLINK-8217] [kinesis] Properly annotate APIs of flink-connector-kinesis

This closes #5138.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/30734d55
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/30734d55
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/30734d55

Branch: refs/heads/master
Commit: 30734d55660bfe00c39138584f0e576e711ad791
Parents: ac0facc
Author: Bowen Li <bo...@gmail.com>
Authored: Thu Dec 7 00:02:16 2017 -0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../connectors/kinesis/FlinkKinesisConsumer.java         |  2 ++
 .../connectors/kinesis/FlinkKinesisProducer.java         |  4 +++-
 .../streaming/connectors/kinesis/KinesisPartitioner.java |  4 +++-
 .../connectors/kinesis/config/AWSConfigConstants.java    |  3 +++
 .../kinesis/config/ConsumerConfigConstants.java          |  2 ++
 .../connectors/kinesis/internals/KinesisDataFetcher.java |  2 ++
 .../connectors/kinesis/internals/ShardConsumer.java      |  2 ++
 .../connectors/kinesis/model/KinesisStreamShard.java     | 11 +++++++++--
 .../kinesis/model/KinesisStreamShardState.java           |  2 ++
 .../connectors/kinesis/model/SentinelSequenceNumber.java |  2 ++
 .../connectors/kinesis/model/SequenceNumber.java         |  3 +++
 .../connectors/kinesis/model/StreamShardHandle.java      |  9 ++++++---
 .../connectors/kinesis/model/StreamShardMetadata.java    |  3 +++
 .../connectors/kinesis/proxy/GetShardListResult.java     |  2 ++
 .../streaming/connectors/kinesis/proxy/KinesisProxy.java |  2 ++
 .../connectors/kinesis/proxy/KinesisProxyInterface.java  |  2 ++
 .../serialization/KinesisDeserializationSchema.java      |  2 ++
 .../KinesisDeserializationSchemaWrapper.java             |  2 ++
 .../serialization/KinesisSerializationSchema.java        |  3 +++
 .../flink/streaming/connectors/kinesis/util/AWSUtil.java |  2 ++
 .../connectors/kinesis/util/KinesisConfigUtil.java       |  2 ++
 21 files changed, 59 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
index f6a9bd1..06b70b6 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
@@ -69,6 +70,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * @param <T> the type of data emitted
  */
+@PublicEvolving
 public class FlinkKinesisConsumer<T> extends RichParallelSourceFunction<T> implements
 		ResultTypeQueryable<T>,
 		CheckpointedFunction {

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
index 04cb78a..a9b48ae 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.configuration.Configuration;
@@ -51,6 +52,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * @param <OUT> Data type to produce into Kinesis Streams
  */
+@PublicEvolving
 public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> implements CheckpointedFunction {
 
 	private static final long serialVersionUID = 6447077318449477846L;
@@ -211,7 +213,7 @@ public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT> implements
 	}
 
 	@Override
-	public void invoke(OUT value) throws Exception {
+	public void invoke(OUT value, Context context) throws Exception {
 		if (this.producer == null) {
 			throw new RuntimeException("Kinesis producer has been closed");
 		}

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
index 6af01c9..6082346 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kinesis;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.Serializable;
 
 /**
@@ -24,6 +26,7 @@ import java.io.Serializable;
  *
  * @param <T> record type
  */
+@PublicEvolving
 public abstract class KinesisPartitioner<T> implements Serializable {
 
 	/**
@@ -49,6 +52,5 @@ public abstract class KinesisPartitioner<T> implements Serializable {
 	 * @param numberOfParallelSubtasks Total number of parallel instances
 	 */
 	public void initialize(int indexOfThisSubtask, int numberOfParallelSubtasks) {
-		//
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
index eb14fc0..f3ff52b 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
@@ -17,11 +17,14 @@
 
 package org.apache.flink.streaming.connectors.kinesis.config;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import com.amazonaws.auth.AWSCredentialsProvider;
 
 /**
  * Configuration keys for AWS service usage.
  */
+@PublicEvolving
 public class AWSConfigConstants {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
index 702ed27..38a0e3d 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.config;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
@@ -26,6 +27,7 @@ import com.amazonaws.services.kinesis.model.ShardIteratorType;
 /**
  * Optional consumer specific configuration keys and default values for {@link FlinkKinesisConsumer}.
  */
+@PublicEvolving
 public class ConsumerConfigConstants extends AWSConfigConstants {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index 8fee60d..83bc57b 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
@@ -72,6 +73,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * and 2) last processed sequence numbers of each subscribed shard. Since operations on the second state will be performed
  * by multiple threads, these operations should only be done using the handler methods provided in this class.
  */
+@Internal
 public class KinesisDataFetcher<T> {
 
 	private static final Logger LOG = LoggerFactory.getLogger(KinesisDataFetcher.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
index a18466c..0d730af 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.metrics.ShardMetricsReporter;
@@ -50,6 +51,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * Thread that does the actual data pulling from AWS Kinesis shards. Each thread is in charge of one Kinesis shard only.
  */
+@Internal
 public class ShardConsumer<T> implements Runnable {
 
 	private static final Logger LOG = LoggerFactory.getLogger(ShardConsumer.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
index 22bfbf5..9632f38 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
+import org.apache.flink.annotation.Internal;
+
 import com.amazonaws.services.kinesis.model.Shard;
 
 import java.io.Serializable;
@@ -24,9 +26,14 @@ import java.io.Serializable;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
- * A legacy serializable representation of a AWS Kinesis Stream shard. It is basically a wrapper class around the information
- * provided along with {@link com.amazonaws.services.kinesis.model.Shard}.
+ * A legacy serializable representation of a AWS Kinesis Stream shard.
+ * It is basically a wrapper class around the information provided along
+ * with {@link com.amazonaws.services.kinesis.model.Shard}.
+ *
+ * @deprecated Will be remove in a future version in favor of {@link StreamShardHandle}.
  */
+@Deprecated
+@Internal
 public class KinesisStreamShard implements Serializable {
 
 	private static final long serialVersionUID = -6004217801761077536L;

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
index fbd2e47..53b0828 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.util.Preconditions;
 
 import com.amazonaws.services.kinesis.model.Shard;
@@ -24,6 +25,7 @@ import com.amazonaws.services.kinesis.model.Shard;
 /**
  * A wrapper class that bundles a {@link StreamShardHandle} with its last processed sequence number.
  */
+@Internal
 public class KinesisStreamShardState {
 
 	/** A handle object that wraps the actual {@link Shard} instance and stream name. */

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
index a5398e4..f148167 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
 
@@ -25,6 +26,7 @@ import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetche
  * The value is initially set by {@link FlinkKinesisConsumer} when {@link KinesisDataFetcher}s are created.
  * The KinesisDataFetchers will use this value to determine how to retrieve the starting shard iterator from AWS Kinesis.
  */
+@Internal
 public enum SentinelSequenceNumber {
 
 	/** Flag value for shard's sequence numbers to indicate that the

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
index 021f53f..9ee18b1 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
+import org.apache.flink.annotation.Internal;
+
 import java.io.Serializable;
 
 import static org.apache.flink.util.Preconditions.checkNotNull;
@@ -26,6 +28,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * and also a subsequence number. If this {@link SequenceNumber} is referring to an aggregated Kinesis record, the
  * subsequence number will be a non-negative value representing the order of the sub-record within the aggregation.
  */
+@Internal
 public class SequenceNumber implements Serializable {
 
 	private static final long serialVersionUID = 876972197938972667L;

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
index 767c227..c38cf3a 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
@@ -17,15 +17,18 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
+import org.apache.flink.annotation.Internal;
+
 import com.amazonaws.services.kinesis.model.Shard;
 
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
- * A wrapper class around the information provided along with streamName and {@link com.amazonaws.services.kinesis.model.Shard},
- * with some extra utility methods to determine whether or not a shard is closed and whether or not the shard is
- * a result of parent shard splits or merges.
+ * A wrapper class around the information provided along with streamName and
+ * {@link com.amazonaws.services.kinesis.model.Shard}, with some extra utility methods to determine whether
+ * or not a shard is closed and whether or not the shard is a result of parent shard splits or merges.
  */
+@Internal
 public class StreamShardHandle {
 
 	private final String streamName;

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
index a158a8b..571a38b 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kinesis.model;
 
+import org.apache.flink.annotation.Internal;
+
 import java.io.Serializable;
 import java.util.Objects;
 
@@ -26,6 +28,7 @@ import java.util.Objects;
  * is required to avoid being locked-in to a specific AWS SDK version in order to maintain the consumer's state
  * backwards compatibility.
  */
+@Internal
 public class StreamShardMetadata implements Serializable {
 
 	private static final long serialVersionUID = 5134869582298563604L;

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
index fcfb3ac..24438e8 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 
 import java.util.HashMap;
@@ -28,6 +29,7 @@ import java.util.Set;
 /**
  * Basic model class to bundle the shards retrieved from Kinesis on a {@link KinesisProxyInterface#getShardList(Map)} call.
  */
+@Internal
 public class GetShardListResult {
 
 	private final Map<String, LinkedList<StreamShardHandle>> streamsToRetrievedShardList = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
index 6eb8134..da81a65 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
@@ -60,6 +61,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * This implementation differs mainly in that we can make operations to arbitrary Kinesis streams, which is a needed
  * functionality for the Flink Kinesis Connector since the consumer may simultaneously read from multiple Kinesis streams.
  */
+@Internal
 public class KinesisProxy implements KinesisProxyInterface {
 
 	private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
index 0538151..30464f3 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 
 import com.amazonaws.services.kinesis.model.GetRecordsResult;
@@ -26,6 +27,7 @@ import java.util.Map;
 /**
  * Interface for a Kinesis proxy that operates on multiple Kinesis streams within the same AWS service region.
  */
+@Internal
 public interface KinesisProxyInterface {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
index c4be96b..5f40a0f 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 
@@ -30,6 +31,7 @@ import java.io.Serializable;
  *
  * @param <T> The type created by the keyed deserialization schema.
  */
+@PublicEvolving
 public interface KinesisDeserializationSchema<T> extends Serializable, ResultTypeQueryable<T> {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
index e058736..ba2f1b5 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.serialization;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 
@@ -27,6 +28,7 @@ import java.io.IOException;
  *
  * @param <T> The type created by the deserialization schema.
  */
+@Internal
 public class KinesisDeserializationSchemaWrapper<T> implements KinesisDeserializationSchema<T> {
 	private static final long serialVersionUID = 9143148962928375886L;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
index 9be410a..5bcd0bf 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kinesis.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.Serializable;
 import java.nio.ByteBuffer;
 
@@ -25,6 +27,7 @@ import java.nio.ByteBuffer;
  * on a record's contents.
  * @param <T>
  */
+@PublicEvolving
 public interface KinesisSerializationSchema<T> extends Serializable {
 	/**
 	 * Serialize the given element into a ByteBuffer.

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
index c2dc5d3..15e6cce 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.util;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.runtime.util.EnvironmentInformation;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.CredentialProvider;
@@ -40,6 +41,7 @@ import java.util.Properties;
 /**
  * Some utilities specific to Amazon Web Service.
  */
+@Internal
 public class AWSUtil {
 	/** Used for formatting Flink-specific user agent string when creating Kinesis client. */
 	private static final String USER_AGENT_FORMAT = "Apache Flink %s (%s) Kinesis Connector";

http://git-wip-us.apache.org/repos/asf/flink/blob/30734d55/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
index a6b0f04..9203136 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.util;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
@@ -38,6 +39,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * Utilities for Flink Kinesis connector configuration.
  */
+@Internal
 public class KinesisConfigUtil {
 
 	/** Maximum number of items to pack into an PutRecords request. **/


[10/19] flink git commit: [FLINK-8162] [kinesis] Move shard metric gauges registration to KinesisDataFetcher

Posted by tz...@apache.org.
[FLINK-8162] [kinesis] Move shard metric gauges registration to KinesisDataFetcher

This commit refactors the registration of shard metric gauges to the
KinesisDataFetcher, instead of being handled by the ShardConsumer.
Overall, this achieves better separation of concerns.

This commit also consolidates all metrics related constant strings to a
separate KinesisConsumerMetricConstants class, with comments that the
metric names should not be touched to maintain backwards compatibility
for the consumer's shipped metrics.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/03841fde
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/03841fde
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/03841fde

Branch: refs/heads/master
Commit: 03841fdece53f0b2264c8a46ae860e7689cabb49
Parents: 4d0d7f9
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 14:11:51 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../kinesis/internals/KinesisDataFetcher.java   | 38 +++++++++++++++----
 .../kinesis/internals/ShardConsumer.java        | 19 +++++-----
 .../metrics/KinesisConsumerMetricConstants.java | 37 ++++++++++++++++++
 .../kinesis/metrics/ShardMetricsReporter.java   | 40 ++++++++++++++++++++
 .../kinesis/internals/ShardConsumerTest.java    |  8 ++--
 5 files changed, 121 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/03841fde/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index e8a264c..8fee60d 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -21,6 +21,8 @@ import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.metrics.KinesisConsumerMetricConstants;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardMetricsReporter;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
@@ -92,6 +94,13 @@ public class KinesisDataFetcher<T> {
 	private final KinesisDeserializationSchema<T> deserializationSchema;
 
 	// ------------------------------------------------------------------------
+	//  Consumer metrics
+	// ------------------------------------------------------------------------
+
+	/** The metric group that all metrics should be registered to. */
+	private final MetricGroup consumerMetricGroup;
+
+	// ------------------------------------------------------------------------
 	//  Subtask-specific settings
 	// ------------------------------------------------------------------------
 
@@ -205,6 +214,9 @@ public class KinesisDataFetcher<T> {
 		this.deserializationSchema = checkNotNull(deserializationSchema);
 		this.kinesis = checkNotNull(kinesis);
 
+		this.consumerMetricGroup = runtimeContext.getMetricGroup()
+			.addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP);
+
 		this.error = checkNotNull(error);
 		this.subscribedShardsState = checkNotNull(subscribedShardsState);
 		this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds);
@@ -274,7 +286,7 @@ public class KinesisDataFetcher<T> {
 						seededStateIndex,
 						subscribedShardsState.get(seededStateIndex).getStreamShardHandle(),
 						subscribedShardsState.get(seededStateIndex).getLastProcessedSequenceNum(),
-						registerMetricGroupForShard(subscribedShardsState.get(seededStateIndex))));
+						registerShardMetrics(consumerMetricGroup, subscribedShardsState.get(seededStateIndex))));
 			}
 		}
 
@@ -321,7 +333,7 @@ public class KinesisDataFetcher<T> {
 						newStateIndex,
 						newShardState.getStreamShardHandle(),
 						newShardState.getLastProcessedSequenceNum(),
-						registerMetricGroupForShard(newShardState)));
+						registerShardMetrics(consumerMetricGroup, newShardState)));
 			}
 
 			// we also check if we are running here so that we won't start the discovery sleep
@@ -547,13 +559,23 @@ public class KinesisDataFetcher<T> {
 
 	/**
 	 * Registers a metric group associated with the shard id of the provided {@link KinesisStreamShardState shardState}.
+	 *
+	 * @return a {@link ShardMetricsReporter} that can be used to update metric values
 	 */
-	private MetricGroup registerMetricGroupForShard(KinesisStreamShardState shardState) {
-		return runtimeContext
-			.getMetricGroup()
-			.addGroup("Kinesis")
-			.addGroup("stream", shardState.getStreamShardHandle().getStreamName())
-			.addGroup("shardId", shardState.getStreamShardHandle().getShard().getShardId());
+	private static ShardMetricsReporter registerShardMetrics(MetricGroup metricGroup, KinesisStreamShardState shardState) {
+		ShardMetricsReporter shardMetrics = new ShardMetricsReporter();
+
+		MetricGroup streamShardMetricGroup = metricGroup
+			.addGroup(
+				KinesisConsumerMetricConstants.STREAM_METRICS_GROUP,
+				shardState.getStreamShardHandle().getStreamName())
+			.addGroup(
+				KinesisConsumerMetricConstants.SHARD_METRICS_GROUP,
+				shardState.getStreamShardHandle().getShard().getShardId());
+
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MILLIS_BEHIND_LATEST_GAUGE, shardMetrics::getMillisBehindLatest);
+
+		return shardMetrics;
 	}
 
 	// ------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/flink/blob/03841fde/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
index 9f6a4cd..a18466c 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -17,9 +17,9 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardMetricsReporter;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
@@ -67,12 +67,12 @@ public class ShardConsumer<T> implements Runnable {
 	private final int maxNumberOfRecordsPerFetch;
 	private final long fetchIntervalMillis;
 
+	private final ShardMetricsReporter shardMetricsReporter;
+
 	private SequenceNumber lastSequenceNum;
 
 	private Date initTimestamp;
 
-	private long millisBehindLatest;
-
 	/**
 	 * Creates a shard consumer.
 	 *
@@ -80,19 +80,19 @@ public class ShardConsumer<T> implements Runnable {
 	 * @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to
 	 * @param subscribedShard the shard this consumer is subscribed to
 	 * @param lastSequenceNum the sequence number in the shard to start consuming
-	 * @param kinesisMetricGroup the metric group to report to
+	 * @param shardMetricsReporter the reporter to report metrics to
 	 */
 	public ShardConsumer(KinesisDataFetcher<T> fetcherRef,
 						Integer subscribedShardStateIndex,
 						StreamShardHandle subscribedShard,
 						SequenceNumber lastSequenceNum,
-						MetricGroup kinesisMetricGroup) {
+						ShardMetricsReporter shardMetricsReporter) {
 		this(fetcherRef,
 			subscribedShardStateIndex,
 			subscribedShard,
 			lastSequenceNum,
 			KinesisProxy.create(fetcherRef.getConsumerConfiguration()),
-			kinesisMetricGroup);
+			shardMetricsReporter);
 	}
 
 	/** This constructor is exposed for testing purposes. */
@@ -101,14 +101,13 @@ public class ShardConsumer<T> implements Runnable {
 							StreamShardHandle subscribedShard,
 							SequenceNumber lastSequenceNum,
 							KinesisProxyInterface kinesis,
-							MetricGroup kinesisMetricGroup) {
+							ShardMetricsReporter shardMetricsReporter) {
 		this.fetcherRef = checkNotNull(fetcherRef);
 		this.subscribedShardStateIndex = checkNotNull(subscribedShardStateIndex);
 		this.subscribedShard = checkNotNull(subscribedShard);
 		this.lastSequenceNum = checkNotNull(lastSequenceNum);
 
-		checkNotNull(kinesisMetricGroup);
-		kinesisMetricGroup.gauge("millisBehindLatest", () -> millisBehindLatest);
+		this.shardMetricsReporter = checkNotNull(shardMetricsReporter);
 
 		checkArgument(
 			!lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get()),
@@ -304,7 +303,7 @@ public class ShardConsumer<T> implements Runnable {
 				getRecordsResult = kinesis.getRecords(shardItr, maxNumberOfRecords);
 
 				// Update millis behind latest so it gets reported by the millisBehindLatest gauge
-				millisBehindLatest = getRecordsResult.getMillisBehindLatest();
+				shardMetricsReporter.setMillisBehindLatest(getRecordsResult.getMillisBehindLatest());
 			} catch (ExpiredIteratorException eiEx) {
 				LOG.warn("Encountered an unexpected expired iterator {} for shard {};" +
 					" refreshing the iterator ...", shardItr, subscribedShard);

http://git-wip-us.apache.org/repos/asf/flink/blob/03841fde/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
new file mode 100644
index 0000000..1b83f16
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.metrics;
+
+import org.apache.flink.annotation.Internal;
+
+/**
+ * A collection of consumer metric related constant names.
+ *
+ * <p>The names must not be changed, as that would break backwards compatibility for the consumer metrics.
+ */
+@Internal
+public class KinesisConsumerMetricConstants {
+
+	public static final String KINESIS_CONSUMER_METRICS_GROUP = "KinesisConsumer";
+
+	public static final String STREAM_METRICS_GROUP = "stream";
+	public static final String SHARD_METRICS_GROUP = "shardId";
+
+	public static final String MILLIS_BEHIND_LATEST_GAUGE = "millisBehindLatest";
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/03841fde/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java
new file mode 100644
index 0000000..2b6a491
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.metrics;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer;
+
+/**
+ * A container for {@link ShardConsumer}s to report metric values.
+ */
+@Internal
+public class ShardMetricsReporter {
+
+	private volatile long millisBehindLatest = -1;
+
+	public long getMillisBehindLatest() {
+		return millisBehindLatest;
+	}
+
+	public void setMillisBehindLatest(long millisBehindLatest) {
+		this.millisBehindLatest = millisBehindLatest;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/03841fde/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
index 6900183..e8c5902 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
@@ -17,7 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardMetricsReporter;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
@@ -78,7 +78,8 @@ public class ShardConsumerTest {
 			0,
 			subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
-			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9), new UnregisteredMetricsGroup()).run();
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9),
+			new ShardMetricsReporter()).run();
 
 		assertTrue(fetcher.getNumOfElementsCollected() == 1000);
 		assertTrue(subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum().equals(
@@ -119,7 +120,8 @@ public class ShardConsumerTest {
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
 			// Get a total of 1000 records with 9 getRecords() calls,
 			// and the 7th getRecords() call will encounter an unexpected expired shard iterator
-			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(1000, 9, 7), new UnregisteredMetricsGroup()).run();
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(1000, 9, 7),
+			new ShardMetricsReporter()).run();
 
 		assertTrue(fetcher.getNumOfElementsCollected() == 1000);
 		assertTrue(subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum().equals(


[08/19] flink git commit: [FLINK-8162] [kinesis, metrics] Add Kinesis' millisBehindLatest metric

Posted by tz...@apache.org.
[FLINK-8162] [kinesis, metrics] Add Kinesis' millisBehindLatest metric


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4d0d7f92
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4d0d7f92
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4d0d7f92

Branch: refs/heads/master
Commit: 4d0d7f925abaa2d02814a52bfb666f445bd25ea7
Parents: 82a9ae5
Author: Cristian <me...@cristian.io>
Authored: Tue Dec 19 07:14:22 2017 -0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 docs/monitoring/metrics.md                      | 23 ++++++++++++++++++++
 .../kinesis/internals/KinesisDataFetcher.java   | 18 +++++++++++++--
 .../kinesis/internals/ShardConsumer.java        | 20 ++++++++++++++---
 .../kinesis/internals/ShardConsumerTest.java    |  5 +++--
 .../testutils/FakeKinesisBehavioursFactory.java |  2 ++
 .../testutils/TestableKinesisDataFetcher.java   |  3 +++
 6 files changed, 64 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/4d0d7f92/docs/monitoring/metrics.md
----------------------------------------------------------------------
diff --git a/docs/monitoring/metrics.md b/docs/monitoring/metrics.md
index 5c4b85f..4e296ba 100644
--- a/docs/monitoring/metrics.md
+++ b/docs/monitoring/metrics.md
@@ -1305,6 +1305,29 @@ Thus, in order to infer the metric identifier:
   </tbody>
 </table>
 
+#### Kinesis Connectors
+<table class="table table-bordered">
+  <thead>
+    <tr>
+      <th class="text-left" style="width: 18%">Scope</th>
+      <th class="text-left" style="width: 26%">Metrics</th>
+      <th class="text-left" style="width: 48%">Description</th>
+      <th class="text-left" style="width: 8%">Type</th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>millisBehindLatest</td>
+      <td>The number of milliseconds the <a>GetRecords</a> response is from the head of the stream,
+      indicating how far behind current time the consumer is. A value of zero indicates record
+      processing is caught up, and there are no new records to process at this moment.
+      </td>
+      <td>Counter</td>
+    </tr>
+  </tbody>
+</table>
+
 ## Latency tracking
 
 Flink allows to track the latency of records traveling through the system. To enable the latency tracking

http://git-wip-us.apache.org/repos/asf/flink/blob/4d0d7f92/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index bbfbb20..e8a264c 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
 import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
@@ -272,7 +273,8 @@ public class KinesisDataFetcher<T> {
 						this,
 						seededStateIndex,
 						subscribedShardsState.get(seededStateIndex).getStreamShardHandle(),
-						subscribedShardsState.get(seededStateIndex).getLastProcessedSequenceNum()));
+						subscribedShardsState.get(seededStateIndex).getLastProcessedSequenceNum(),
+						registerMetricGroupForShard(subscribedShardsState.get(seededStateIndex))));
 			}
 		}
 
@@ -318,7 +320,8 @@ public class KinesisDataFetcher<T> {
 						this,
 						newStateIndex,
 						newShardState.getStreamShardHandle(),
-						newShardState.getLastProcessedSequenceNum()));
+						newShardState.getLastProcessedSequenceNum(),
+						registerMetricGroupForShard(newShardState)));
 			}
 
 			// we also check if we are running here so that we won't start the discovery sleep
@@ -542,6 +545,17 @@ public class KinesisDataFetcher<T> {
 		}
 	}
 
+	/**
+	 * Registers a metric group associated with the shard id of the provided {@link KinesisStreamShardState shardState}.
+	 */
+	private MetricGroup registerMetricGroupForShard(KinesisStreamShardState shardState) {
+		return runtimeContext
+			.getMetricGroup()
+			.addGroup("Kinesis")
+			.addGroup("stream", shardState.getStreamShardHandle().getStreamName())
+			.addGroup("shardId", shardState.getStreamShardHandle().getShard().getShardId());
+	}
+
 	// ------------------------------------------------------------------------
 	//  Miscellaneous utility functions
 	// ------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/flink/blob/4d0d7f92/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
index 2d48e5f..9f6a4cd 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
+import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
@@ -70,6 +71,8 @@ public class ShardConsumer<T> implements Runnable {
 
 	private Date initTimestamp;
 
+	private long millisBehindLatest;
+
 	/**
 	 * Creates a shard consumer.
 	 *
@@ -77,16 +80,19 @@ public class ShardConsumer<T> implements Runnable {
 	 * @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to
 	 * @param subscribedShard the shard this consumer is subscribed to
 	 * @param lastSequenceNum the sequence number in the shard to start consuming
+	 * @param kinesisMetricGroup the metric group to report to
 	 */
 	public ShardConsumer(KinesisDataFetcher<T> fetcherRef,
 						Integer subscribedShardStateIndex,
 						StreamShardHandle subscribedShard,
-						SequenceNumber lastSequenceNum) {
+						SequenceNumber lastSequenceNum,
+						MetricGroup kinesisMetricGroup) {
 		this(fetcherRef,
 			subscribedShardStateIndex,
 			subscribedShard,
 			lastSequenceNum,
-			KinesisProxy.create(fetcherRef.getConsumerConfiguration()));
+			KinesisProxy.create(fetcherRef.getConsumerConfiguration()),
+			kinesisMetricGroup);
 	}
 
 	/** This constructor is exposed for testing purposes. */
@@ -94,11 +100,16 @@ public class ShardConsumer<T> implements Runnable {
 							Integer subscribedShardStateIndex,
 							StreamShardHandle subscribedShard,
 							SequenceNumber lastSequenceNum,
-							KinesisProxyInterface kinesis) {
+							KinesisProxyInterface kinesis,
+							MetricGroup kinesisMetricGroup) {
 		this.fetcherRef = checkNotNull(fetcherRef);
 		this.subscribedShardStateIndex = checkNotNull(subscribedShardStateIndex);
 		this.subscribedShard = checkNotNull(subscribedShard);
 		this.lastSequenceNum = checkNotNull(lastSequenceNum);
+
+		checkNotNull(kinesisMetricGroup);
+		kinesisMetricGroup.gauge("millisBehindLatest", () -> millisBehindLatest);
+
 		checkArgument(
 			!lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get()),
 			"Should not start a ShardConsumer if the shard has already been completely read.");
@@ -291,6 +302,9 @@ public class ShardConsumer<T> implements Runnable {
 		while (getRecordsResult == null) {
 			try {
 				getRecordsResult = kinesis.getRecords(shardItr, maxNumberOfRecords);
+
+				// Update millis behind latest so it gets reported by the millisBehindLatest gauge
+				millisBehindLatest = getRecordsResult.getMillisBehindLatest();
 			} catch (ExpiredIteratorException eiEx) {
 				LOG.warn("Encountered an unexpected expired iterator {} for shard {};" +
 					" refreshing the iterator ...", shardItr, subscribedShard);

http://git-wip-us.apache.org/repos/asf/flink/blob/4d0d7f92/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
index a194835..6900183 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
+import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
@@ -77,7 +78,7 @@ public class ShardConsumerTest {
 			0,
 			subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
-			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9)).run();
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9), new UnregisteredMetricsGroup()).run();
 
 		assertTrue(fetcher.getNumOfElementsCollected() == 1000);
 		assertTrue(subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum().equals(
@@ -118,7 +119,7 @@ public class ShardConsumerTest {
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
 			// Get a total of 1000 records with 9 getRecords() calls,
 			// and the 7th getRecords() call will encounter an unexpected expired shard iterator
-			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(1000, 9, 7)).run();
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(1000, 9, 7), new UnregisteredMetricsGroup()).run();
 
 		assertTrue(fetcher.getNumOfElementsCollected() == 1000);
 		assertTrue(subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum().equals(

http://git-wip-us.apache.org/repos/asf/flink/blob/4d0d7f92/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
index 2fda0d5..61a3a6b 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
@@ -116,6 +116,7 @@ public class FakeKinesisBehavioursFactory {
 				// assuming that the maxRecordsToGet is always large enough
 				return new GetRecordsResult()
 					.withRecords(shardItrToRecordBatch.get(shardIterator))
+					.withMillisBehindLatest(500L)
 					.withNextShardIterator(
 						(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls - 1)
 							? null : String.valueOf(Integer.valueOf(shardIterator) + 1)); // last next shard iterator is null
@@ -176,6 +177,7 @@ public class FakeKinesisBehavioursFactory {
 			// assuming that the maxRecordsToGet is always large enough
 			return new GetRecordsResult()
 				.withRecords(shardItrToRecordBatch.get(shardIterator))
+				.withMillisBehindLatest(500L)
 				.withNextShardIterator(
 					(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls - 1)
 						? null : String.valueOf(Integer.valueOf(shardIterator) + 1)); // last next shard iterator is null

http://git-wip-us.apache.org/repos/asf/flink/blob/4d0d7f92/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
index 5d76262..f63a30c 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
@@ -20,6 +20,7 @@ package org.apache.flink.streaming.connectors.kinesis.testutils;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.common.serialization.SimpleStringSchema;
 import org.apache.flink.core.testutils.OneShotLatch;
+import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
@@ -136,6 +137,8 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 			}
 		});
 
+		Mockito.when(mockedRuntimeContext.getMetricGroup()).thenReturn(new UnregisteredMetricsGroup());
+
 		return mockedRuntimeContext;
 	}
 }


[16/19] flink git commit: [FLINK-8296] [kafka] Rework FlinkKafkaConsumerBaseTest to not rely on Java reflection

Posted by tz...@apache.org.
[FLINK-8296] [kafka] Rework FlinkKafkaConsumerBaseTest to not rely on Java reflection

Reflection was mainly used to inject mocks into private fields of the
FlinkKafkaConsumerBase, without the need to fully execute all operator
life cycle methods. This, however, caused the unit tests to be too
implementation-specific.

This commit reworks the FlinkKafkaConsumerBaseTest to remove test
consumer instantiation methods that rely on reflection for dependency
injection. All tests now instantiate dummy test consumers normally, and
let all tests properly execute all operator life cycle methods
regardless of the tested logic.

This closes #5188.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/37cdaf97
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/37cdaf97
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/37cdaf97

Branch: refs/heads/master
Commit: 37cdaf976ff198a6e5c1d0e6e38a50de185cec1e
Parents: faaa135
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Tue Dec 19 16:10:44 2017 -0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../kafka/FlinkKafkaConsumerBase.java           |   5 +
 .../kafka/FlinkKafkaConsumerBaseTest.java       | 532 ++++++++++++-------
 2 files changed, 338 insertions(+), 199 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/37cdaf97/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
index c350442..7a87f4d 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
@@ -884,4 +884,9 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 	OffsetCommitMode getOffsetCommitMode() {
 		return offsetCommitMode;
 	}
+
+	@VisibleForTesting
+	LinkedMap getPendingOffsetsToCommit() {
+		return pendingOffsetsToCommit;
+	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/37cdaf97/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
index 180b12a..f8aeea2 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
@@ -18,17 +18,27 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.api.common.ExecutionConfig;
+import org.apache.flink.api.common.accumulators.Accumulator;
+import org.apache.flink.api.common.state.KeyedStateStore;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.ListStateDescriptor;
 import org.apache.flink.api.common.state.OperatorStateStore;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.runtime.state.StateInitializationContext;
+import org.apache.flink.core.testutils.CheckedThread;
+import org.apache.flink.core.testutils.OneShotLatch;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
+import org.apache.flink.runtime.memory.MemoryManager;
+import org.apache.flink.runtime.operators.testutils.MockEnvironment;
+import org.apache.flink.runtime.state.FunctionInitializationContext;
 import org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
 import org.apache.flink.streaming.api.operators.StreamSource;
 import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
 import org.apache.flink.streaming.connectors.kafka.config.OffsetCommitMode;
@@ -44,16 +54,13 @@ import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.Preconditions;
 import org.apache.flink.util.SerializedValue;
 
-import org.apache.commons.collections.map.LinkedMap;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 
 import java.io.Serializable;
-import java.lang.reflect.Field;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -86,20 +93,19 @@ public class FlinkKafkaConsumerBaseTest {
 	 * Tests that not both types of timestamp extractors / watermark generators can be used.
 	 */
 	@Test
+	@SuppressWarnings("unchecked")
 	public void testEitherWatermarkExtractor() {
 		try {
-			new DummyFlinkKafkaConsumer<>().assignTimestampsAndWatermarks((AssignerWithPeriodicWatermarks<Object>) null);
+			new DummyFlinkKafkaConsumer<String>().assignTimestampsAndWatermarks((AssignerWithPeriodicWatermarks<String>) null);
 			fail();
 		} catch (NullPointerException ignored) {}
 
 		try {
-			new DummyFlinkKafkaConsumer<>().assignTimestampsAndWatermarks((AssignerWithPunctuatedWatermarks<Object>) null);
+			new DummyFlinkKafkaConsumer<String>().assignTimestampsAndWatermarks((AssignerWithPunctuatedWatermarks<String>) null);
 			fail();
 		} catch (NullPointerException ignored) {}
 
-		@SuppressWarnings("unchecked")
 		final AssignerWithPeriodicWatermarks<String> periodicAssigner = mock(AssignerWithPeriodicWatermarks.class);
-		@SuppressWarnings("unchecked")
 		final AssignerWithPunctuatedWatermarks<String> punctuatedAssigner = mock(AssignerWithPunctuatedWatermarks.class);
 
 		DummyFlinkKafkaConsumer<String> c1 = new DummyFlinkKafkaConsumer<>();
@@ -123,17 +129,16 @@ public class FlinkKafkaConsumerBaseTest {
 	@Test
 	public void ignoreCheckpointWhenNotRunning() throws Exception {
 		@SuppressWarnings("unchecked")
-		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);
+		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>();
 
-		FlinkKafkaConsumerBase<String> consumer = getConsumer(fetcher, new LinkedMap(), false);
-		OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
-		TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>();
-		when(operatorStateStore.getListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);
+		final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>();
+		setupConsumer(consumer, false, listState, true, 0, 1);
 
+		// snapshot before the fetcher starts running
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
 
+		// no state should have been checkpointed
 		assertFalse(listState.get().iterator().hasNext());
-		consumer.notifyCheckpointComplete(66L);
 	}
 
 	/**
@@ -142,32 +147,13 @@ public class FlinkKafkaConsumerBaseTest {
 	 */
 	@Test
 	public void checkRestoredCheckpointWhenFetcherNotReady() throws Exception {
-		OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
-
-		TestingListState<Serializable> restoredListState = new TestingListState<>();
-		restoredListState.add(Tuple2.of(new KafkaTopicPartition("abc", 13), 16768L));
-		restoredListState.add(Tuple2.of(new KafkaTopicPartition("def", 7), 987654321L));
-
-		FlinkKafkaConsumerBase<String> consumer = getConsumer(null, new LinkedMap(), true);
-		StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
-		when(context.getNumberOfParallelSubtasks()).thenReturn(1);
-		when(context.getIndexOfThisSubtask()).thenReturn(0);
-		consumer.setRuntimeContext(context);
-
-		// mock old 1.2 state (empty)
-		when(operatorStateStore.getSerializableListState(Matchers.any(String.class))).thenReturn(new TestingListState<Serializable>());
-		// mock 1.3 state
-		when(operatorStateStore.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(restoredListState);
-
-		StateInitializationContext initializationContext = mock(StateInitializationContext.class);
-
-		when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore);
-		when(initializationContext.isRestored()).thenReturn(true);
-
-		consumer.initializeState(initializationContext);
+		@SuppressWarnings("unchecked")
+		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>();
 
-		consumer.open(new Configuration());
+		final TestingListState<Tuple2<KafkaTopicPartition, Long>> restoredListState = new TestingListState<>();
+		setupConsumer(consumer, true, restoredListState, true, 0, 1);
 
+		// snapshot before the fetcher starts running
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(17, 17));
 
 		// ensure that the list was cleared and refilled. while this is an implementation detail, we use it here
@@ -192,67 +178,68 @@ public class FlinkKafkaConsumerBaseTest {
 
 	@Test
 	public void testConfigureOnCheckpointsCommitMode() throws Exception {
+		@SuppressWarnings("unchecked")
+		// auto-commit enabled; this should be ignored in this case
+		final DummyFlinkKafkaConsumer<String> consumer = new DummyFlinkKafkaConsumer<>(true);
 
-		DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer();
-		consumer.setIsAutoCommitEnabled(true); // this should be ignored
-
-		StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
-		when(context.getIndexOfThisSubtask()).thenReturn(0);
-		when(context.getNumberOfParallelSubtasks()).thenReturn(1);
-		when(context.isCheckpointingEnabled()).thenReturn(true); // enable checkpointing, auto commit should be ignored
-		consumer.setRuntimeContext(context);
+		setupConsumer(
+			consumer,
+			false,
+			null,
+			true, // enable checkpointing; auto commit should be ignored
+			0,
+			1);
 
-		consumer.open(new Configuration());
 		assertEquals(OffsetCommitMode.ON_CHECKPOINTS, consumer.getOffsetCommitMode());
 	}
 
 	@Test
 	public void testConfigureAutoCommitMode() throws Exception {
+		@SuppressWarnings("unchecked")
+		final DummyFlinkKafkaConsumer<String> consumer = new DummyFlinkKafkaConsumer<>(true);
 
-		DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer();
-		consumer.setIsAutoCommitEnabled(true);
-
-		StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
-		when(context.getIndexOfThisSubtask()).thenReturn(0);
-		when(context.getNumberOfParallelSubtasks()).thenReturn(1);
-		when(context.isCheckpointingEnabled()).thenReturn(false); // disable checkpointing, auto commit should be respected
-		consumer.setRuntimeContext(context);
+		setupConsumer(
+			consumer,
+			false,
+			null,
+			false, // disable checkpointing; auto commit should be respected
+			0,
+			1);
 
-		consumer.open(new Configuration());
 		assertEquals(OffsetCommitMode.KAFKA_PERIODIC, consumer.getOffsetCommitMode());
 	}
 
 	@Test
 	public void testConfigureDisableOffsetCommitWithCheckpointing() throws Exception {
-
-		DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer();
-		consumer.setIsAutoCommitEnabled(true); // this should be ignored
-
-		StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
-		when(context.getIndexOfThisSubtask()).thenReturn(0);
-		when(context.getNumberOfParallelSubtasks()).thenReturn(1);
-		when(context.isCheckpointingEnabled()).thenReturn(true); // enable checkpointing, auto commit should be ignored
-		consumer.setRuntimeContext(context);
-
+		@SuppressWarnings("unchecked")
+		// auto-commit enabled; this should be ignored in this case
+		final DummyFlinkKafkaConsumer<String> consumer = new DummyFlinkKafkaConsumer<>(true);
 		consumer.setCommitOffsetsOnCheckpoints(false); // disabling offset committing should override everything
 
-		consumer.open(new Configuration());
+		setupConsumer(
+			consumer,
+			false,
+			null,
+			true, // enable checkpointing; auto commit should be ignored
+			0,
+			1);
+
 		assertEquals(OffsetCommitMode.DISABLED, consumer.getOffsetCommitMode());
 	}
 
 	@Test
 	public void testConfigureDisableOffsetCommitWithoutCheckpointing() throws Exception {
+		@SuppressWarnings("unchecked")
+		final DummyFlinkKafkaConsumer<String> consumer = new DummyFlinkKafkaConsumer<>(false);
 
-		DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer();
-		consumer.setIsAutoCommitEnabled(false);
-
-		StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
-		when(context.getIndexOfThisSubtask()).thenReturn(0);
-		when(context.getNumberOfParallelSubtasks()).thenReturn(1);
-		when(context.isCheckpointingEnabled()).thenReturn(false); // disable checkpointing, auto commit should be respected
-		consumer.setRuntimeContext(context);
+		setupConsumer(
+			consumer,
+			false,
+			null,
+			false, // disable checkpointing; auto commit should be respected
+			0,
+			1);
 
-		consumer.open(new Configuration());
 		assertEquals(OffsetCommitMode.DISABLED, consumer.getOffsetCommitMode());
 	}
 
@@ -278,36 +265,37 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// --------------------------------------------------------------------
 
-		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);
+		final OneShotLatch runLatch = new OneShotLatch();
+		final OneShotLatch stopLatch = new OneShotLatch();
+		final AbstractFetcher<String, ?> fetcher = getRunnableMockFetcher(runLatch, stopLatch);
 		when(fetcher.snapshotCurrentState()).thenReturn(state1, state2, state3);
 
-		final LinkedMap pendingOffsetsToCommit = new LinkedMap();
-
-		FlinkKafkaConsumerBase<String> consumer = getConsumer(fetcher, pendingOffsetsToCommit, true);
-		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
-		when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true); // enable checkpointing
-		when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0);
-		when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1);
-		consumer.setRuntimeContext(mockRuntimeContext);
-
-		assertEquals(0, pendingOffsetsToCommit.size());
-
-		OperatorStateStore backend = mock(OperatorStateStore.class);
+		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(
+				fetcher,
+				mock(AbstractPartitionDiscoverer.class),
+				false);
 
-		TestingListState<Serializable> listState = new TestingListState<>();
-		// mock old 1.2 state (empty)
-		when(backend.getSerializableListState(Matchers.any(String.class))).thenReturn(new TestingListState<Serializable>());
-		// mock 1.3 state
-		when(backend.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);
+		final TestingListState<Serializable> listState = new TestingListState<>();
 
-		StateInitializationContext initializationContext = mock(StateInitializationContext.class);
+		// setup and run the consumer; wait until the consumer reaches the main fetch loop before continuing test
+		setupConsumer(consumer, false, listState, true, 0, 1);
 
-		when(initializationContext.getOperatorStateStore()).thenReturn(backend);
-		when(initializationContext.isRestored()).thenReturn(false, true, true, true);
+		final CheckedThread runThread = new CheckedThread() {
+			@Override
+			public void go() throws Exception {
+				consumer.run(mock(SourceFunction.SourceContext.class));
+			}
 
-		consumer.initializeState(initializationContext);
+			@Override
+			public void sync() throws Exception {
+				stopLatch.trigger();
+				super.sync();
+			}
+		};
+		runThread.start();
+		runLatch.await();
 
-		consumer.open(new Configuration());
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
 
 		// checkpoint 1
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138));
@@ -320,8 +308,8 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 
 		assertEquals(state1, snapshot1);
-		assertEquals(1, pendingOffsetsToCommit.size());
-		assertEquals(state1, pendingOffsetsToCommit.get(138L));
+		assertEquals(1, consumer.getPendingOffsetsToCommit().size());
+		assertEquals(state1, consumer.getPendingOffsetsToCommit().get(138L));
 
 		// checkpoint 2
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140));
@@ -334,13 +322,13 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 
 		assertEquals(state2, snapshot2);
-		assertEquals(2, pendingOffsetsToCommit.size());
-		assertEquals(state2, pendingOffsetsToCommit.get(140L));
+		assertEquals(2, consumer.getPendingOffsetsToCommit().size());
+		assertEquals(state2, consumer.getPendingOffsetsToCommit().get(140L));
 
 		// ack checkpoint 1
 		consumer.notifyCheckpointComplete(138L);
-		assertEquals(1, pendingOffsetsToCommit.size());
-		assertTrue(pendingOffsetsToCommit.containsKey(140L));
+		assertEquals(1, consumer.getPendingOffsetsToCommit().size());
+		assertTrue(consumer.getPendingOffsetsToCommit().containsKey(140L));
 
 		// checkpoint 3
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141));
@@ -353,37 +341,35 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 
 		assertEquals(state3, snapshot3);
-		assertEquals(2, pendingOffsetsToCommit.size());
-		assertEquals(state3, pendingOffsetsToCommit.get(141L));
+		assertEquals(2, consumer.getPendingOffsetsToCommit().size());
+		assertEquals(state3, consumer.getPendingOffsetsToCommit().get(141L));
 
 		// ack checkpoint 3, subsumes number 2
 		consumer.notifyCheckpointComplete(141L);
-		assertEquals(0, pendingOffsetsToCommit.size());
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
 
 		consumer.notifyCheckpointComplete(666); // invalid checkpoint
-		assertEquals(0, pendingOffsetsToCommit.size());
-
-		OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
-		listState = new TestingListState<>();
-		when(operatorStateStore.getListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
 
 		// create 500 snapshots
 		for (int i = 100; i < 600; i++) {
 			consumer.snapshotState(new StateSnapshotContextSynchronousImpl(i, i));
 			listState.clear();
 		}
-		assertEquals(FlinkKafkaConsumerBase.MAX_NUM_PENDING_CHECKPOINTS, pendingOffsetsToCommit.size());
+		assertEquals(FlinkKafkaConsumerBase.MAX_NUM_PENDING_CHECKPOINTS, consumer.getPendingOffsetsToCommit().size());
 
 		// commit only the second last
 		consumer.notifyCheckpointComplete(598);
-		assertEquals(1, pendingOffsetsToCommit.size());
+		assertEquals(1, consumer.getPendingOffsetsToCommit().size());
 
 		// access invalid checkpoint
 		consumer.notifyCheckpointComplete(590);
 
 		// and the last
 		consumer.notifyCheckpointComplete(599);
-		assertEquals(0, pendingOffsetsToCommit.size());
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
+
+		runThread.sync();
 	}
 
 	@Test
@@ -407,38 +393,38 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// --------------------------------------------------------------------
 
-		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);
+		final OneShotLatch runLatch = new OneShotLatch();
+		final OneShotLatch stopLatch = new OneShotLatch();
+		final AbstractFetcher<String, ?> fetcher = getRunnableMockFetcher(runLatch, stopLatch);
 		when(fetcher.snapshotCurrentState()).thenReturn(state1, state2, state3);
 
-		final LinkedMap pendingOffsetsToCommit = new LinkedMap();
-
-		FlinkKafkaConsumerBase<String> consumer = getConsumer(fetcher, pendingOffsetsToCommit, true);
-		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
-		when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true); // enable checkpointing
-		when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0);
-		when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1);
-		consumer.setRuntimeContext(mockRuntimeContext);
-
+		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(
+				fetcher,
+				mock(AbstractPartitionDiscoverer.class),
+				false);
 		consumer.setCommitOffsetsOnCheckpoints(false); // disable offset committing
 
-		assertEquals(0, pendingOffsetsToCommit.size());
-
-		OperatorStateStore backend = mock(OperatorStateStore.class);
+		final TestingListState<Serializable> listState = new TestingListState<>();
 
-		TestingListState<Serializable> listState = new TestingListState<>();
-		// mock old 1.2 state (empty)
-		when(backend.getSerializableListState(Matchers.any(String.class))).thenReturn(new TestingListState<Serializable>());
-		// mock 1.3 state
-		when(backend.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);
+		// setup and run the consumer; wait until the consumer reaches the main fetch loop before continuing test
+		setupConsumer(consumer, false, listState, true, 0, 1);
 
-		StateInitializationContext initializationContext = mock(StateInitializationContext.class);
-
-		when(initializationContext.getOperatorStateStore()).thenReturn(backend);
-		when(initializationContext.isRestored()).thenReturn(false, true, true, true);
+		final CheckedThread runThread = new CheckedThread() {
+			@Override
+			public void go() throws Exception {
+				consumer.run(mock(SourceFunction.SourceContext.class));
+			}
 
-		consumer.initializeState(initializationContext);
+			@Override
+			public void sync() throws Exception {
+				stopLatch.trigger();
+				super.sync();
+			}
+		};
+		runThread.start();
+		runLatch.await();
 
-		consumer.open(new Configuration());
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
 
 		// checkpoint 1
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138));
@@ -451,7 +437,7 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 
 		assertEquals(state1, snapshot1);
-		assertEquals(0, pendingOffsetsToCommit.size()); // pending offsets to commit should not be updated
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size()); // pending offsets to commit should not be updated
 
 		// checkpoint 2
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140));
@@ -464,7 +450,7 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 
 		assertEquals(state2, snapshot2);
-		assertEquals(0, pendingOffsetsToCommit.size()); // pending offsets to commit should not be updated
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size()); // pending offsets to commit should not be updated
 
 		// ack checkpoint 1
 		consumer.notifyCheckpointComplete(138L);
@@ -481,7 +467,7 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 
 		assertEquals(state3, snapshot3);
-		assertEquals(0, pendingOffsetsToCommit.size()); // pending offsets to commit should not be updated
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size()); // pending offsets to commit should not be updated
 
 		// ack checkpoint 3, subsumes number 2
 		consumer.notifyCheckpointComplete(141L);
@@ -490,16 +476,12 @@ public class FlinkKafkaConsumerBaseTest {
 		consumer.notifyCheckpointComplete(666); // invalid checkpoint
 		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
 
-		OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
-		listState = new TestingListState<>();
-		when(operatorStateStore.getListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);
-
 		// create 500 snapshots
 		for (int i = 100; i < 600; i++) {
 			consumer.snapshotState(new StateSnapshotContextSynchronousImpl(i, i));
 			listState.clear();
 		}
-		assertEquals(0, pendingOffsetsToCommit.size()); // pending offsets to commit should not be updated
+		assertEquals(0, consumer.getPendingOffsetsToCommit().size()); // pending offsets to commit should not be updated
 
 		// commit only the second last
 		consumer.notifyCheckpointComplete(598);
@@ -532,7 +514,7 @@ public class FlinkKafkaConsumerBaseTest {
 	 * of topics fetched from Kafka.
 	 */
 	@SuppressWarnings("unchecked")
-	void testRescaling(
+	private void testRescaling(
 		final int initialParallelism,
 		final int numPartitions,
 		final int restoredParallelism,
@@ -554,8 +536,14 @@ public class FlinkKafkaConsumerBaseTest {
 			new AbstractStreamOperatorTestHarness[initialParallelism];
 
 		for (int i = 0; i < initialParallelism; i++) {
-			consumers[i] = new DummyFlinkKafkaConsumer<>(
-				Collections.singletonList("test-topic"), mockFetchedPartitionsOnStartup);
+			TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer(
+				new KafkaTopicsDescriptor(Collections.singletonList("test-topic"), null),
+				i,
+				initialParallelism,
+				TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList("test-topic")),
+				TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockFetchedPartitionsOnStartup));
+
+			consumers[i] = new DummyFlinkKafkaConsumer<>(mock(AbstractFetcher.class), partitionDiscoverer, false);
 			testHarnesses[i] = createTestHarness(consumers[i], initialParallelism, i);
 
 			// initializeState() is always called, null signals that we didn't restore
@@ -602,8 +590,14 @@ public class FlinkKafkaConsumerBaseTest {
 			new AbstractStreamOperatorTestHarness[restoredParallelism];
 
 		for (int i = 0; i < restoredParallelism; i++) {
-			restoredConsumers[i] = new DummyFlinkKafkaConsumer<>(
-				Collections.singletonList("test-topic"), mockFetchedPartitionsAfterRestore);
+			TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer(
+				new KafkaTopicsDescriptor(Collections.singletonList("test-topic"), null),
+				i,
+				restoredParallelism,
+				TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList("test-topic")),
+				TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockFetchedPartitionsAfterRestore));
+
+			restoredConsumers[i] = new DummyFlinkKafkaConsumer<>(mock(AbstractFetcher.class), partitionDiscoverer, false);
 			restoredTestHarnesses[i] = createTestHarness(restoredConsumers[i], restoredParallelism, i);
 
 			// initializeState() is always called, null signals that we didn't restore
@@ -630,28 +624,6 @@ public class FlinkKafkaConsumerBaseTest {
 
 	// ------------------------------------------------------------------------
 
-	private static <T> FlinkKafkaConsumerBase<T> getConsumer(
-			AbstractFetcher<T, ?> fetcher, LinkedMap pendingOffsetsToCommit, boolean running) throws Exception {
-		FlinkKafkaConsumerBase<T> consumer = new DummyFlinkKafkaConsumer<>();
-		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
-		Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);
-		consumer.setRuntimeContext(mockRuntimeContext);
-
-		Field fetcherField = FlinkKafkaConsumerBase.class.getDeclaredField("kafkaFetcher");
-		fetcherField.setAccessible(true);
-		fetcherField.set(consumer, fetcher);
-
-		Field mapField = FlinkKafkaConsumerBase.class.getDeclaredField("pendingOffsetsToCommit");
-		mapField.setAccessible(true);
-		mapField.set(consumer, pendingOffsetsToCommit);
-
-		Field runningField = FlinkKafkaConsumerBase.class.getDeclaredField("running");
-		runningField.setAccessible(true);
-		runningField.set(consumer, running);
-
-		return consumer;
-	}
-
 	private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness(
 		SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception {
 
@@ -667,25 +639,43 @@ public class FlinkKafkaConsumerBaseTest {
 
 	// ------------------------------------------------------------------------
 
+	/**
+	 * An instantiable dummy {@link FlinkKafkaConsumerBase} that supports injecting
+	 * mocks for {@link FlinkKafkaConsumerBase#kafkaFetcher}, {@link FlinkKafkaConsumerBase#partitionDiscoverer},
+	 * and {@link FlinkKafkaConsumerBase#getIsAutoCommitEnabled()}.
+	 */
 	private static class DummyFlinkKafkaConsumer<T> extends FlinkKafkaConsumerBase<T> {
 		private static final long serialVersionUID = 1L;
 
-		boolean isAutoCommitEnabled = false;
+		private AbstractFetcher<T, ?> testFetcher;
+		private AbstractPartitionDiscoverer testPartitionDiscoverer;
+		private boolean isAutoCommitEnabled;
 
-		private List<String> fixedMockGetAllTopicsReturnSequence;
-		private List<KafkaTopicPartition> fixedMockGetAllPartitionsForTopicsReturnSequence;
+		@SuppressWarnings("unchecked")
+		DummyFlinkKafkaConsumer() {
+			this(false);
+		}
 
-		public DummyFlinkKafkaConsumer() {
-			this(Collections.singletonList("dummy-topic"), Collections.singletonList(new KafkaTopicPartition("dummy-topic", 0)));
+		@SuppressWarnings("unchecked")
+		DummyFlinkKafkaConsumer(boolean isAutoCommitEnabled) {
+			this(mock(AbstractFetcher.class), mock(AbstractPartitionDiscoverer.class), isAutoCommitEnabled);
 		}
 
 		@SuppressWarnings("unchecked")
-		public DummyFlinkKafkaConsumer(
-				List<String> fixedMockGetAllTopicsReturnSequence,
-				List<KafkaTopicPartition> fixedMockGetAllPartitionsForTopicsReturnSequence) {
-			super(Arrays.asList("dummy-topic"), null, (KeyedDeserializationSchema < T >) mock(KeyedDeserializationSchema.class), 0);
-			this.fixedMockGetAllTopicsReturnSequence = Preconditions.checkNotNull(fixedMockGetAllTopicsReturnSequence);
-			this.fixedMockGetAllPartitionsForTopicsReturnSequence = Preconditions.checkNotNull(fixedMockGetAllPartitionsForTopicsReturnSequence);
+		DummyFlinkKafkaConsumer(
+				AbstractFetcher<T, ?> testFetcher,
+				AbstractPartitionDiscoverer testPartitionDiscoverer,
+				boolean isAutoCommitEnabled) {
+
+			super(
+					Collections.singletonList("dummy-topic"),
+					null,
+					(KeyedDeserializationSchema < T >) mock(KeyedDeserializationSchema.class),
+					PARTITION_DISCOVERY_DISABLED);
+
+			this.testFetcher = testFetcher;
+			this.testPartitionDiscoverer = testPartitionDiscoverer;
+			this.isAutoCommitEnabled = isAutoCommitEnabled;
 		}
 
 		@Override
@@ -697,7 +687,7 @@ public class FlinkKafkaConsumerBaseTest {
 				SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
 				StreamingRuntimeContext runtimeContext,
 				OffsetCommitMode offsetCommitMode) throws Exception {
-			return mock(AbstractFetcher.class);
+			return this.testFetcher;
 		}
 
 		@Override
@@ -705,21 +695,12 @@ public class FlinkKafkaConsumerBaseTest {
 				KafkaTopicsDescriptor topicsDescriptor,
 				int indexOfThisSubtask,
 				int numParallelSubtasks) {
-			return new TestPartitionDiscoverer(
-				topicsDescriptor,
-				indexOfThisSubtask,
-				numParallelSubtasks,
-				TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(fixedMockGetAllTopicsReturnSequence),
-				TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(fixedMockGetAllPartitionsForTopicsReturnSequence));
+			return this.testPartitionDiscoverer;
 		}
 
 		@Override
 		protected boolean getIsAutoCommitEnabled() {
-			return isAutoCommitEnabled;
-		}
-
-		public void setIsAutoCommitEnabled(boolean isAutoCommitEnabled) {
-			this.isAutoCommitEnabled = isAutoCommitEnabled;
+			return this.isAutoCommitEnabled;
 		}
 	}
 
@@ -748,7 +729,7 @@ public class FlinkKafkaConsumerBaseTest {
 			return list;
 		}
 
-		public boolean isClearCalled() {
+		boolean isClearCalled() {
 			return clearCalled;
 		}
 
@@ -761,4 +742,157 @@ public class FlinkKafkaConsumerBaseTest {
 			}
 		}
 	}
+
+	/**
+	 * Returns a mock {@link AbstractFetcher}, with run / stop latches injected in
+	 * the {@link AbstractFetcher#runFetchLoop()} method.
+	 */
+	private static <T> AbstractFetcher<T, ?> getRunnableMockFetcher(
+			OneShotLatch runLatch,
+			OneShotLatch stopLatch) throws Exception {
+
+		@SuppressWarnings("unchecked")
+		final AbstractFetcher<T, ?> fetcher = mock(AbstractFetcher.class);
+
+		Mockito.doAnswer(invocationOnMock -> {
+			runLatch.trigger();
+			stopLatch.await();
+			return null;
+		}).when(fetcher).runFetchLoop();
+
+		return fetcher;
+	}
+
+	@SuppressWarnings("unchecked")
+	private static <T, S> void setupConsumer(
+			FlinkKafkaConsumerBase<T> consumer,
+			boolean isRestored,
+			ListState<S> restoredListState,
+			boolean isCheckpointingEnabled,
+			int subtaskIndex,
+			int totalNumSubtasks) throws Exception {
+
+		// run setup procedure in operator life cycle
+		consumer.setRuntimeContext(new MockRuntimeContext(isCheckpointingEnabled, totalNumSubtasks, subtaskIndex));
+		consumer.initializeState(new MockFunctionInitializationContext(isRestored, new MockOperatorStateStore(restoredListState)));
+		consumer.open(new Configuration());
+	}
+
+	private static class MockRuntimeContext extends StreamingRuntimeContext {
+
+		private final boolean isCheckpointingEnabled;
+
+		private final int numParallelSubtasks;
+		private final int subtaskIndex;
+
+		private MockRuntimeContext(
+				boolean isCheckpointingEnabled,
+				int numParallelSubtasks,
+				int subtaskIndex) {
+
+			super(
+				new MockStreamOperator(),
+				new MockEnvironment("mockTask", 4 * MemoryManager.DEFAULT_PAGE_SIZE, null, 16),
+				Collections.<String, Accumulator<?, ?>>emptyMap());
+
+			this.isCheckpointingEnabled = isCheckpointingEnabled;
+			this.numParallelSubtasks = numParallelSubtasks;
+			this.subtaskIndex = subtaskIndex;
+		}
+
+		@Override
+		public MetricGroup getMetricGroup() {
+			return new UnregisteredMetricsGroup();
+		}
+
+		@Override
+		public boolean isCheckpointingEnabled() {
+			return isCheckpointingEnabled;
+		}
+
+		@Override
+		public int getIndexOfThisSubtask() {
+			return subtaskIndex;
+		}
+
+		@Override
+		public int getNumberOfParallelSubtasks() {
+			return numParallelSubtasks;
+		}
+
+		// ------------------------------------------------------------------------
+
+		private static class MockStreamOperator extends AbstractStreamOperator<Integer> {
+			private static final long serialVersionUID = -1153976702711944427L;
+
+			@Override
+			public ExecutionConfig getExecutionConfig() {
+				return new ExecutionConfig();
+			}
+		}
+	}
+
+	private static class MockOperatorStateStore implements OperatorStateStore {
+
+		private final ListState<?> mockRestoredUnionListState;
+
+		private MockOperatorStateStore(ListState<?> restoredUnionListState) {
+			this.mockRestoredUnionListState = restoredUnionListState;
+		}
+
+		@Override
+		@SuppressWarnings("unchecked")
+		public <S> ListState<S> getUnionListState(ListStateDescriptor<S> stateDescriptor) throws Exception {
+			return (ListState<S>) mockRestoredUnionListState;
+		}
+
+		@Override
+		public <T extends Serializable> ListState<T> getSerializableListState(String stateName) throws Exception {
+			// return empty state for the legacy 1.2 Kafka consumer state
+			return new TestingListState<>();
+		}
+
+		// ------------------------------------------------------------------------
+
+		@Override
+		public <S> ListState<S> getOperatorState(ListStateDescriptor<S> stateDescriptor) throws Exception {
+			throw new UnsupportedOperationException();
+		}
+
+		@Override
+		public <S> ListState<S> getListState(ListStateDescriptor<S> stateDescriptor) throws Exception {
+			throw new UnsupportedOperationException();
+		}
+
+		@Override
+		public Set<String> getRegisteredStateNames() {
+			throw new UnsupportedOperationException();
+		}
+	}
+
+	private static class MockFunctionInitializationContext implements FunctionInitializationContext {
+
+		private final boolean isRestored;
+		private final OperatorStateStore operatorStateStore;
+
+		private MockFunctionInitializationContext(boolean isRestored, OperatorStateStore operatorStateStore) {
+			this.isRestored = isRestored;
+			this.operatorStateStore = operatorStateStore;
+		}
+
+		@Override
+		public boolean isRestored() {
+			return isRestored;
+		}
+
+		@Override
+		public OperatorStateStore getOperatorStateStore() {
+			return operatorStateStore;
+		}
+
+		@Override
+		public KeyedStateStore getKeyedStateStore() {
+			throw new UnsupportedOperationException();
+		}
+	}
 }


[04/19] flink git commit: [hotfix] Fix typo in AbstractMetricGroup.java

Posted by tz...@apache.org.
[hotfix] Fix typo in AbstractMetricGroup.java

This closes #5280.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/e304600d
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/e304600d
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/e304600d

Branch: refs/heads/master
Commit: e304600d08aa4f843d2ff68cd43162447c317969
Parents: 4496248
Author: maqingxiang-it <ma...@dev05v.sys.corp.qihoo.net>
Authored: Thu Jan 11 13:44:04 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../apache/flink/runtime/metrics/groups/AbstractMetricGroup.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/e304600d/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java b/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
index e6df3a4..6d9c7d9 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
@@ -360,7 +360,7 @@ public abstract class AbstractMetricGroup<A extends AbstractMetricGroup<?>> impl
 		// add the metric only if the group is still open
 		synchronized (this) {
 			if (!closed) {
-				// immediately put without a 'contains' check to optimize the common case (no collition)
+				// immediately put without a 'contains' check to optimize the common case (no collision)
 				// collisions are resolved later
 				Metric prior = metrics.put(name, metric);
 


[03/19] flink git commit: [FLINK-8306] [kafka, tests] Fix mock verifications on final method

Posted by tz...@apache.org.
[FLINK-8306] [kafka, tests] Fix mock verifications on final method

Previously, offset commit behavioural tests relied on verifying on
AbstractFetcher::commitInternalOffsetsToKafka(). That method is actually
final, and could not be mocked.

This commit fixes that by implementing a proper mock AbstractFetcher,
which keeps track of the offset commits that go through.

This closes #5284.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/69fff746
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/69fff746
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/69fff746

Branch: refs/heads/master
Commit: 69fff746ac99ec3ad428edf4500e38de17f2b797
Parents: 37cdaf9
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 08:45:32 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../kafka/FlinkKafkaConsumerBaseTest.java       | 203 ++++++++++---------
 .../kafka/internals/AbstractFetcherTest.java    |  63 +-----
 .../kafka/testutils/TestSourceContext.java      |  87 ++++++++
 3 files changed, 199 insertions(+), 154 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/69fff746/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
index f8aeea2..7185ee8 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java
@@ -19,7 +19,6 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.ExecutionConfig;
-import org.apache.flink.api.common.accumulators.Accumulator;
 import org.apache.flink.api.common.state.KeyedStateStore;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.ListStateDescriptor;
@@ -48,7 +47,9 @@ import org.apache.flink.streaming.connectors.kafka.internals.KafkaCommitCallback
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicsDescriptor;
 import org.apache.flink.streaming.connectors.kafka.testutils.TestPartitionDiscoverer;
+import org.apache.flink.streaming.connectors.kafka.testutils.TestSourceContext;
 import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles;
+import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService;
 import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.Preconditions;
@@ -56,11 +57,13 @@ import org.apache.flink.util.SerializedValue;
 
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Matchers;
-import org.mockito.Mockito;
+
+import javax.annotation.Nonnull;
 
 import java.io.Serializable;
+import java.util.ArrayDeque;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -68,6 +71,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import static org.apache.flink.util.Preconditions.checkState;
 import static org.hamcrest.Matchers.everyItem;
 import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.collection.IsIn.isIn;
@@ -75,14 +79,11 @@ import static org.hamcrest.collection.IsMapContaining.hasKey;
 import static org.hamcrest.core.IsNot.not;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyMap;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 
 /**
  * Tests for the {@link FlinkKafkaConsumerBase}.
@@ -129,7 +130,11 @@ public class FlinkKafkaConsumerBaseTest {
 	@Test
 	public void ignoreCheckpointWhenNotRunning() throws Exception {
 		@SuppressWarnings("unchecked")
-		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>();
+		final MockFetcher<String> fetcher = new MockFetcher<>();
+		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(
+				fetcher,
+				mock(AbstractPartitionDiscoverer.class),
+				false);
 
 		final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>();
 		setupConsumer(consumer, false, listState, true, 0, 1);
@@ -139,6 +144,11 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// no state should have been checkpointed
 		assertFalse(listState.get().iterator().hasNext());
+
+		// acknowledgement of the checkpoint should also not result in any offset commits
+		consumer.notifyCheckpointComplete(1L);
+		assertNull(fetcher.getAndClearLastCommittedOffsets());
+		assertEquals(0, fetcher.getCommitCount());
 	}
 
 	/**
@@ -265,10 +275,7 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// --------------------------------------------------------------------
 
-		final OneShotLatch runLatch = new OneShotLatch();
-		final OneShotLatch stopLatch = new OneShotLatch();
-		final AbstractFetcher<String, ?> fetcher = getRunnableMockFetcher(runLatch, stopLatch);
-		when(fetcher.snapshotCurrentState()).thenReturn(state1, state2, state3);
+		final MockFetcher<String> fetcher = new MockFetcher<>(state1, state2, state3);
 
 		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(
 				fetcher,
@@ -283,17 +290,11 @@ public class FlinkKafkaConsumerBaseTest {
 		final CheckedThread runThread = new CheckedThread() {
 			@Override
 			public void go() throws Exception {
-				consumer.run(mock(SourceFunction.SourceContext.class));
-			}
-
-			@Override
-			public void sync() throws Exception {
-				stopLatch.trigger();
-				super.sync();
+				consumer.run(new TestSourceContext<>());
 			}
 		};
 		runThread.start();
-		runLatch.await();
+		fetcher.waitUntilRun();
 
 		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
 
@@ -329,6 +330,8 @@ public class FlinkKafkaConsumerBaseTest {
 		consumer.notifyCheckpointComplete(138L);
 		assertEquals(1, consumer.getPendingOffsetsToCommit().size());
 		assertTrue(consumer.getPendingOffsetsToCommit().containsKey(140L));
+		assertEquals(state1, fetcher.getAndClearLastCommittedOffsets());
+		assertEquals(1, fetcher.getCommitCount());
 
 		// checkpoint 3
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141));
@@ -347,28 +350,15 @@ public class FlinkKafkaConsumerBaseTest {
 		// ack checkpoint 3, subsumes number 2
 		consumer.notifyCheckpointComplete(141L);
 		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
+		assertEquals(state3, fetcher.getAndClearLastCommittedOffsets());
+		assertEquals(2, fetcher.getCommitCount());
 
 		consumer.notifyCheckpointComplete(666); // invalid checkpoint
 		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
+		assertNull(fetcher.getAndClearLastCommittedOffsets());
+		assertEquals(2, fetcher.getCommitCount());
 
-		// create 500 snapshots
-		for (int i = 100; i < 600; i++) {
-			consumer.snapshotState(new StateSnapshotContextSynchronousImpl(i, i));
-			listState.clear();
-		}
-		assertEquals(FlinkKafkaConsumerBase.MAX_NUM_PENDING_CHECKPOINTS, consumer.getPendingOffsetsToCommit().size());
-
-		// commit only the second last
-		consumer.notifyCheckpointComplete(598);
-		assertEquals(1, consumer.getPendingOffsetsToCommit().size());
-
-		// access invalid checkpoint
-		consumer.notifyCheckpointComplete(590);
-
-		// and the last
-		consumer.notifyCheckpointComplete(599);
-		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
-
+		consumer.cancel();
 		runThread.sync();
 	}
 
@@ -393,10 +383,7 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// --------------------------------------------------------------------
 
-		final OneShotLatch runLatch = new OneShotLatch();
-		final OneShotLatch stopLatch = new OneShotLatch();
-		final AbstractFetcher<String, ?> fetcher = getRunnableMockFetcher(runLatch, stopLatch);
-		when(fetcher.snapshotCurrentState()).thenReturn(state1, state2, state3);
+		final MockFetcher<String> fetcher = new MockFetcher<>(state1, state2, state3);
 
 		final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(
 				fetcher,
@@ -412,17 +399,11 @@ public class FlinkKafkaConsumerBaseTest {
 		final CheckedThread runThread = new CheckedThread() {
 			@Override
 			public void go() throws Exception {
-				consumer.run(mock(SourceFunction.SourceContext.class));
-			}
-
-			@Override
-			public void sync() throws Exception {
-				stopLatch.trigger();
-				super.sync();
+				consumer.run(new TestSourceContext<>());
 			}
 		};
 		runThread.start();
-		runLatch.await();
+		fetcher.waitUntilRun();
 
 		assertEquals(0, consumer.getPendingOffsetsToCommit().size());
 
@@ -454,7 +435,8 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// ack checkpoint 1
 		consumer.notifyCheckpointComplete(138L);
-		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
+		assertEquals(0, fetcher.getCommitCount());
+		assertNull(fetcher.getAndClearLastCommittedOffsets()); // no offsets should be committed
 
 		// checkpoint 3
 		consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141));
@@ -471,29 +453,15 @@ public class FlinkKafkaConsumerBaseTest {
 
 		// ack checkpoint 3, subsumes number 2
 		consumer.notifyCheckpointComplete(141L);
-		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
+		assertEquals(0, fetcher.getCommitCount());
+		assertNull(fetcher.getAndClearLastCommittedOffsets()); // no offsets should be committed
 
 		consumer.notifyCheckpointComplete(666); // invalid checkpoint
-		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
-
-		// create 500 snapshots
-		for (int i = 100; i < 600; i++) {
-			consumer.snapshotState(new StateSnapshotContextSynchronousImpl(i, i));
-			listState.clear();
-		}
-		assertEquals(0, consumer.getPendingOffsetsToCommit().size()); // pending offsets to commit should not be updated
+		assertEquals(0, fetcher.getCommitCount());
+		assertNull(fetcher.getAndClearLastCommittedOffsets()); // no offsets should be committed
 
-		// commit only the second last
-		consumer.notifyCheckpointComplete(598);
-		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
-
-		// access invalid checkpoint
-		consumer.notifyCheckpointComplete(590);
-		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
-
-		// and the last
-		consumer.notifyCheckpointComplete(599);
-		verify(fetcher, never()).commitInternalOffsetsToKafka(anyMap(), Matchers.any(KafkaCommitCallback.class)); // no offsets should be committed
+		consumer.cancel();
+		runThread.sync();
 	}
 
 	@Test
@@ -743,26 +711,6 @@ public class FlinkKafkaConsumerBaseTest {
 		}
 	}
 
-	/**
-	 * Returns a mock {@link AbstractFetcher}, with run / stop latches injected in
-	 * the {@link AbstractFetcher#runFetchLoop()} method.
-	 */
-	private static <T> AbstractFetcher<T, ?> getRunnableMockFetcher(
-			OneShotLatch runLatch,
-			OneShotLatch stopLatch) throws Exception {
-
-		@SuppressWarnings("unchecked")
-		final AbstractFetcher<T, ?> fetcher = mock(AbstractFetcher.class);
-
-		Mockito.doAnswer(invocationOnMock -> {
-			runLatch.trigger();
-			stopLatch.await();
-			return null;
-		}).when(fetcher).runFetchLoop();
-
-		return fetcher;
-	}
-
 	@SuppressWarnings("unchecked")
 	private static <T, S> void setupConsumer(
 			FlinkKafkaConsumerBase<T> consumer,
@@ -778,6 +726,77 @@ public class FlinkKafkaConsumerBaseTest {
 		consumer.open(new Configuration());
 	}
 
+	private static class MockFetcher<T> extends AbstractFetcher<T, Object> {
+
+		private final OneShotLatch runLatch = new OneShotLatch();
+		private final OneShotLatch stopLatch = new OneShotLatch();
+
+		private final ArrayDeque<HashMap<KafkaTopicPartition, Long>> stateSnapshotsToReturn = new ArrayDeque<>();
+
+		private Map<KafkaTopicPartition, Long> lastCommittedOffsets;
+		private int commitCount = 0;
+
+		@SafeVarargs
+		private MockFetcher(HashMap<KafkaTopicPartition, Long>... stateSnapshotsToReturn) throws Exception {
+			super(
+					new TestSourceContext<>(),
+					new HashMap<>(),
+					null,
+					null,
+					new TestProcessingTimeService(),
+					0,
+					MockFetcher.class.getClassLoader(),
+					false);
+
+			this.stateSnapshotsToReturn.addAll(Arrays.asList(stateSnapshotsToReturn));
+		}
+
+		@Override
+		protected void doCommitInternalOffsetsToKafka(
+				Map<KafkaTopicPartition, Long> offsets,
+				@Nonnull KafkaCommitCallback commitCallback) throws Exception {
+			this.lastCommittedOffsets = offsets;
+			this.commitCount++;
+			commitCallback.onSuccess();
+		}
+
+		@Override
+		public void runFetchLoop() throws Exception {
+			runLatch.trigger();
+			stopLatch.await();
+		}
+
+		@Override
+		public HashMap<KafkaTopicPartition, Long> snapshotCurrentState() {
+			checkState(!stateSnapshotsToReturn.isEmpty());
+			return stateSnapshotsToReturn.poll();
+		}
+
+		@Override
+		protected Object createKafkaPartitionHandle(KafkaTopicPartition partition) {
+			throw new UnsupportedOperationException();
+		}
+
+		@Override
+		public void cancel() {
+			stopLatch.trigger();
+		}
+
+		private void waitUntilRun() throws InterruptedException {
+			runLatch.await();
+		}
+
+		private Map<KafkaTopicPartition, Long> getAndClearLastCommittedOffsets() {
+			Map<KafkaTopicPartition, Long> offsets = this.lastCommittedOffsets;
+			this.lastCommittedOffsets = null;
+			return offsets;
+		}
+
+		private int getCommitCount() {
+			return commitCount;
+		}
+	}
+
 	private static class MockRuntimeContext extends StreamingRuntimeContext {
 
 		private final boolean isCheckpointingEnabled;
@@ -793,7 +812,7 @@ public class FlinkKafkaConsumerBaseTest {
 			super(
 				new MockStreamOperator(),
 				new MockEnvironment("mockTask", 4 * MemoryManager.DEFAULT_PAGE_SIZE, null, 16),
-				Collections.<String, Accumulator<?, ?>>emptyMap());
+				Collections.emptyMap());
 
 			this.isCheckpointingEnabled = isCheckpointingEnabled;
 			this.numParallelSubtasks = numParallelSubtasks;

http://git-wip-us.apache.org/repos/asf/flink/blob/69fff746/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
index e4a58dd..6fe1d6f 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java
@@ -22,7 +22,7 @@ import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
 import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext;
 import org.apache.flink.streaming.api.watermark.Watermark;
-import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+import org.apache.flink.streaming.connectors.kafka.testutils.TestSourceContext;
 import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService;
 import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService;
 import org.apache.flink.util.SerializedValue;
@@ -444,67 +444,6 @@ public class AbstractFetcherTest {
 
 	// ------------------------------------------------------------------------
 
-	private static final class TestSourceContext<T> implements SourceContext<T> {
-
-		private final Object checkpointLock = new Object();
-		private final Object watermarkLock = new Object();
-
-		private volatile StreamRecord<T> latestElement;
-		private volatile Watermark currentWatermark;
-
-		@Override
-		public void collect(T element) {
-			this.latestElement = new StreamRecord<>(element);
-		}
-
-		@Override
-		public void collectWithTimestamp(T element, long timestamp) {
-			this.latestElement = new StreamRecord<>(element, timestamp);
-		}
-
-		@Override
-		public void emitWatermark(Watermark mark) {
-			synchronized (watermarkLock) {
-				currentWatermark = mark;
-				watermarkLock.notifyAll();
-			}
-		}
-
-		@Override
-		public void markAsTemporarilyIdle() {
-			throw new UnsupportedOperationException();
-		}
-
-		@Override
-		public Object getCheckpointLock() {
-			return checkpointLock;
-		}
-
-		@Override
-		public void close() {}
-
-		public StreamRecord<T> getLatestElement() {
-			return latestElement;
-		}
-
-		public boolean hasWatermark() {
-			return currentWatermark != null;
-		}
-
-		public Watermark getLatestWatermark() throws InterruptedException {
-			synchronized (watermarkLock) {
-				while (currentWatermark == null) {
-					watermarkLock.wait();
-				}
-				Watermark wm = currentWatermark;
-				currentWatermark = null;
-				return wm;
-			}
-		}
-	}
-
-	// ------------------------------------------------------------------------
-
 	private static class PeriodicTestExtractor implements AssignerWithPeriodicWatermarks<Long> {
 
 		private volatile long maxTimestamp = Long.MIN_VALUE;

http://git-wip-us.apache.org/repos/asf/flink/blob/69fff746/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestSourceContext.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestSourceContext.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestSourceContext.java
new file mode 100644
index 0000000..2a96a68
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestSourceContext.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka.testutils;
+
+import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.api.watermark.Watermark;
+import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+
+/**
+ * Test {@link org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext}.
+ */
+public final class TestSourceContext<T> implements SourceFunction.SourceContext<T> {
+
+	private final Object checkpointLock = new Object();
+	private final Object watermarkLock = new Object();
+
+	private volatile StreamRecord<T> latestElement;
+	private volatile Watermark currentWatermark;
+
+	@Override
+	public void collect(T element) {
+		this.latestElement = new StreamRecord<>(element);
+	}
+
+	@Override
+	public void collectWithTimestamp(T element, long timestamp) {
+		this.latestElement = new StreamRecord<>(element, timestamp);
+	}
+
+	@Override
+	public void emitWatermark(Watermark mark) {
+		synchronized (watermarkLock) {
+			currentWatermark = mark;
+			watermarkLock.notifyAll();
+		}
+	}
+
+	@Override
+	public void markAsTemporarilyIdle() {
+		// do nothing
+	}
+
+	@Override
+	public Object getCheckpointLock() {
+		return checkpointLock;
+	}
+
+	@Override
+	public void close() {
+		// do nothing
+	}
+
+	public StreamRecord<T> getLatestElement() {
+		return latestElement;
+	}
+
+	public boolean hasWatermark() {
+		return currentWatermark != null;
+	}
+
+	public Watermark getLatestWatermark() throws InterruptedException {
+		synchronized (watermarkLock) {
+			while (currentWatermark == null) {
+				watermarkLock.wait();
+			}
+			Watermark wm = currentWatermark;
+			currentWatermark = null;
+			return wm;
+		}
+	}
+}


[05/19] flink git commit: [FLINK-8271] [kinesis] Remove usage of deprecated Kinesis APIs

Posted by tz...@apache.org.
[FLINK-8271] [kinesis] Remove usage of deprecated Kinesis APIs

This closes #5171.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d53a722e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d53a722e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d53a722e

Branch: refs/heads/master
Commit: d53a722e769e8ff6009d53208bf6702ec3e4a6f5
Parents: 0692275
Author: Bowen Li <bo...@gmail.com>
Authored: Tue Jan 2 11:21:28 2018 -0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../connectors/kinesis/proxy/KinesisProxy.java  |  4 +--
 .../connectors/kinesis/util/AWSUtil.java        | 34 ++++++++++++--------
 .../manualtests/ManualExactlyOnceTest.java      |  7 ++--
 ...nualExactlyOnceWithStreamReshardingTest.java |  6 ++--
 4 files changed, 29 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/d53a722e/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
index 7daaad2..6eb8134 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -22,7 +22,7 @@ import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
 
 import com.amazonaws.AmazonServiceException;
-import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.AmazonKinesis;
 import com.amazonaws.services.kinesis.model.DescribeStreamRequest;
 import com.amazonaws.services.kinesis.model.DescribeStreamResult;
 import com.amazonaws.services.kinesis.model.GetRecordsRequest;
@@ -65,7 +65,7 @@ public class KinesisProxy implements KinesisProxyInterface {
 	private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);
 
 	/** The actual Kinesis client from the AWS SDK that we will be using to make calls. */
-	private final AmazonKinesisClient kinesisClient;
+	private final AmazonKinesis kinesisClient;
 
 	/** Random seed used to calculate backoff jitter for Kinesis operations. */
 	private static final Random seed = new Random();

http://git-wip-us.apache.org/repos/asf/flink/blob/d53a722e/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
index 5670526..c2dc5d3 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
@@ -30,9 +30,10 @@ import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
 import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
 import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
 import com.amazonaws.auth.profile.ProfileCredentialsProvider;
-import com.amazonaws.regions.Region;
+import com.amazonaws.client.builder.AwsClientBuilder;
 import com.amazonaws.regions.Regions;
-import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder;
 
 import java.util.Properties;
 
@@ -40,27 +41,34 @@ import java.util.Properties;
  * Some utilities specific to Amazon Web Service.
  */
 public class AWSUtil {
+	/** Used for formatting Flink-specific user agent string when creating Kinesis client. */
+	private static final String USER_AGENT_FORMAT = "Apache Flink %s (%s) Kinesis Connector";
 
 	/**
-	 * Creates an Amazon Kinesis Client.
+	 * Creates an AmazonKinesis client.
 	 * @param configProps configuration properties containing the access key, secret key, and region
-	 * @return a new Amazon Kinesis Client
+	 * @return a new AmazonKinesis client
 	 */
-	public static AmazonKinesisClient createKinesisClient(Properties configProps) {
+	public static AmazonKinesis createKinesisClient(Properties configProps) {
 		// set a Flink-specific user agent
-		ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
-		awsClientConfig.setUserAgent("Apache Flink " + EnvironmentInformation.getVersion() +
-			" (" + EnvironmentInformation.getRevisionInformation().commitId + ") Kinesis Connector");
+		ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig()
+				.withUserAgentPrefix(String.format(USER_AGENT_FORMAT,
+														EnvironmentInformation.getVersion(),
+														EnvironmentInformation.getRevisionInformation().commitId));
 
 		// utilize automatic refreshment of credentials by directly passing the AWSCredentialsProvider
-		AmazonKinesisClient client = new AmazonKinesisClient(
-			AWSUtil.getCredentialsProvider(configProps), awsClientConfig);
+		AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder.standard()
+				.withCredentials(AWSUtil.getCredentialsProvider(configProps))
+				.withClientConfiguration(awsClientConfig)
+				.withRegion(Regions.fromName(configProps.getProperty(AWSConfigConstants.AWS_REGION)));
 
-		client.setRegion(Region.getRegion(Regions.fromName(configProps.getProperty(AWSConfigConstants.AWS_REGION))));
 		if (configProps.containsKey(AWSConfigConstants.AWS_ENDPOINT)) {
-			client.setEndpoint(configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT));
+			// Set signingRegion as null, to facilitate mocking Kinesis for local tests
+			builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(
+													configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT),
+													null));
 		}
-		return client;
+		return builder.build();
 	}
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/d53a722e/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
index 67ddad2..963002f 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
@@ -27,7 +27,7 @@ import org.apache.flink.streaming.connectors.kinesis.testutils.ExactlyOnceValida
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisEventsGeneratorProducerThread;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
 
-import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.AmazonKinesis;
 import com.amazonaws.services.kinesis.model.DescribeStreamResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,7 +37,7 @@ import java.util.UUID;
 import java.util.concurrent.atomic.AtomicReference;
 
 /**
- * This test first starts a data generator, producing data into kinesis.
+ * This test first starts a data generator, producing data into Kinesis.
  * Then, it starts a consuming topology, ensuring that all records up to a certain
  * point have been seen.
  *
@@ -45,7 +45,6 @@ import java.util.concurrent.atomic.AtomicReference;
  * --region eu-central-1 --accessKey X --secretKey X
  */
 public class ManualExactlyOnceTest {
-
 	private static final Logger LOG = LoggerFactory.getLogger(ManualExactlyOnceTest.class);
 
 	static final int TOTAL_EVENT_COUNT = 1000; // the producer writes one per 10 ms, so it runs for 10k ms = 10 seconds
@@ -63,7 +62,7 @@ public class ManualExactlyOnceTest {
 		configProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, accessKey);
 		configProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey);
 		configProps.setProperty(AWSConfigConstants.AWS_REGION, region);
-		AmazonKinesisClient client = AWSUtil.createKinesisClient(configProps);
+		AmazonKinesis client = AWSUtil.createKinesisClient(configProps);
 
 		// create a stream for the test:
 		client.createStream(streamName, 1);

http://git-wip-us.apache.org/repos/asf/flink/blob/d53a722e/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
index cef8720..93b9caf 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
@@ -27,7 +27,7 @@ import org.apache.flink.streaming.connectors.kinesis.testutils.ExactlyOnceValida
 import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
 
-import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.AmazonKinesis;
 import com.amazonaws.services.kinesis.model.DescribeStreamResult;
 import com.amazonaws.services.kinesis.model.LimitExceededException;
 import com.amazonaws.services.kinesis.model.PutRecordsRequest;
@@ -74,7 +74,7 @@ public class ManualExactlyOnceWithStreamReshardingTest {
 		configProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey);
 		configProps.setProperty(ConsumerConfigConstants.AWS_REGION, region);
 		configProps.setProperty(ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS, "0");
-		final AmazonKinesisClient client = AWSUtil.createKinesisClient(configProps);
+		final AmazonKinesis client = AWSUtil.createKinesisClient(configProps);
 
 		// the stream is first created with 1 shard
 		client.createStream(streamName, 1);
@@ -107,7 +107,7 @@ public class ManualExactlyOnceWithStreamReshardingTest {
 			Runnable manualGenerate = new Runnable() {
 				@Override
 				public void run() {
-					AmazonKinesisClient client = AWSUtil.createKinesisClient(configProps);
+					AmazonKinesis client = AWSUtil.createKinesisClient(configProps);
 					int count = 0;
 					final int batchSize = 30;
 					while (true) {


[07/19] flink git commit: [FLINK-8324] [kafka, metrics] Add new offsets metrics that can be scoped by topic and partition

Posted by tz...@apache.org.
[FLINK-8324] [kafka, metrics] Add new offsets metrics that can be scoped by topic and partition


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6f6b3c8f
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6f6b3c8f
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6f6b3c8f

Branch: refs/heads/master
Commit: 6f6b3c8f030d11b08b05a9a93de02c787b102499
Parents: e304600
Author: Tony Wei <to...@gmail.com>
Authored: Thu Dec 28 10:19:24 2017 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 docs/monitoring/metrics.md                              | 12 ++++++++++++
 .../connectors/kafka/internals/AbstractFetcher.java     |  6 ++++++
 2 files changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/6f6b3c8f/docs/monitoring/metrics.md
----------------------------------------------------------------------
diff --git a/docs/monitoring/metrics.md b/docs/monitoring/metrics.md
index a820821..5c4b85f 100644
--- a/docs/monitoring/metrics.md
+++ b/docs/monitoring/metrics.md
@@ -1290,6 +1290,18 @@ Thus, in order to infer the metric identifier:
        <td>Kafka offset commit failure count if Kafka commit is turned on and checkpointing is enabled.</td>
        <td>Counter</td>
     </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>currentOffsets</td>
+      <td>Kafka current offset.This metric has two user-scope variables: topic, partition, which can be used to specifiy particular metric by topic name and partition id</td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>committedOffsets</td>
+      <td>Kafka successfully committed offset if Kafka commit is turned on and checkpointing is enabled. This metric has two user-scope variables: topic, partition, which can be used to specifiy particular metric by topic name and partition id</td>
+      <td>Gauge</td>
+    </tr>
   </tbody>
 </table>
 

http://git-wip-us.apache.org/repos/asf/flink/blob/6f6b3c8f/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
index 5240326..258e3dc 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
@@ -570,6 +570,12 @@ public abstract class AbstractFetcher<T, KPH> {
 		for (KafkaTopicPartitionState<KPH> ktp : subscribedPartitionStates) {
 			currentOffsets.gauge(ktp.getTopic() + "-" + ktp.getPartition(), new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET));
 			committedOffsets.gauge(ktp.getTopic() + "-" + ktp.getPartition(), new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET));
+
+			MetricGroup topicPartitionGroup = metricGroup
+				.addGroup("topic", ktp.getTopic())
+				.addGroup("partition", Integer.toString(ktp.getPartition()));
+			topicPartitionGroup.gauge("currentOffsets", new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET));
+			topicPartitionGroup.gauge("committedOffsets", new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET));
 		}
 	}
 


[02/19] flink git commit: [hotfix] [kafka] Remove stale comment on publishing procedures of AbstractFetcher

Posted by tz...@apache.org.
[hotfix] [kafka] Remove stale comment on publishing procedures of AbstractFetcher

The previous comment mentioned "only now will the fetcher return at
least the restored offsets when calling snapshotCurrentState()". This is
a remnant of the previous fetcher initialization behaviour, where in the
past the fetcher wasn't directly seeded with restored offsets on
instantiation.

Since this is no longer true, this commit fixes the stale comment to
avoid confusion.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ac0facc8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ac0facc8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ac0facc8

Branch: refs/heads/master
Commit: ac0facc8754ab8bf41f6be96b4241e0a9078f52f
Parents: 69fff74
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Wed Dec 20 11:54:40 2017 -0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../kafka/FlinkKafkaConsumerBase.java           | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/ac0facc8/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
index 7a87f4d..2193d75 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
@@ -547,8 +547,12 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 			sourceContext.markAsTemporarilyIdle();
 		}
 
-		// create the fetcher that will communicate with the Kafka brokers
-		final AbstractFetcher<T, ?> fetcher = createFetcher(
+		// from this point forward:
+		//   - 'snapshotState' will draw offsets from the fetcher,
+		//     instead of being built from `subscribedPartitionsToStartOffsets`
+		//   - 'notifyCheckpointComplete' will start to do work (i.e. commit offsets to
+		//     Kafka through the fetcher, if configured to do so)
+		this.kafkaFetcher = createFetcher(
 				sourceContext,
 				subscribedPartitionsToStartOffsets,
 				periodicWatermarkAssigner,
@@ -556,12 +560,6 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 				(StreamingRuntimeContext) getRuntimeContext(),
 				offsetCommitMode);
 
-		// publish the reference, for snapshot-, commit-, and cancel calls
-		// IMPORTANT: We can only do that now, because only now will calls to
-		//            the fetchers 'snapshotCurrentState()' method return at least
-		//            the restored offsets
-		this.kafkaFetcher = fetcher;
-
 		if (!running) {
 			return;
 		}
@@ -600,7 +598,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 
 							// no need to add the discovered partitions if we were closed during the meantime
 							if (running && !discoveredPartitions.isEmpty()) {
-								fetcher.addDiscoveredPartitions(discoveredPartitions);
+								kafkaFetcher.addDiscoveredPartitions(discoveredPartitions);
 							}
 
 							// do not waste any time sleeping if we're not running anymore
@@ -623,7 +621,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 			});
 
 			discoveryLoopThread.start();
-			fetcher.runFetchLoop();
+			kafkaFetcher.runFetchLoop();
 
 			// --------------------------------------------------------------------
 
@@ -640,7 +638,7 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 			// won't be using the discoverer
 			partitionDiscoverer.close();
 
-			fetcher.runFetchLoop();
+			kafkaFetcher.runFetchLoop();
 		}
 	}
 


[18/19] flink git commit: [FLINK-8276] [kafka] Properly annotate APIs for Kafka connector

Posted by tz...@apache.org.
[FLINK-8276] [kafka] Properly annotate APIs for Kafka connector

This closes #5173.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4ceabed9
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4ceabed9
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4ceabed9

Branch: refs/heads/master
Commit: 4ceabed9ad108acd6b67ec59e2f079669ab73046
Parents: 9b5fce6
Author: zhangminglei <zm...@163.com>
Authored: Wed Jan 10 17:31:19 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:29 2018 +0800

----------------------------------------------------------------------
 .../flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java   | 1 +
 .../flink/streaming/connectors/kafka/FlinkKafkaProducer010.java   | 2 ++
 .../flink/streaming/connectors/kafka/Kafka010AvroTableSource.java | 2 ++
 .../flink/streaming/connectors/kafka/Kafka010JsonTableSink.java   | 2 ++
 .../flink/streaming/connectors/kafka/Kafka010JsonTableSource.java | 2 ++
 .../flink/streaming/connectors/kafka/Kafka010TableSource.java     | 2 ++
 .../streaming/connectors/kafka/internal/Kafka010Fetcher.java      | 2 ++
 .../connectors/kafka/internal/Kafka010PartitionDiscoverer.java    | 2 ++
 .../connectors/kafka/internal/KafkaConsumerCallBridge010.java     | 3 +++
 .../flink/streaming/connectors/kafka/FlinkKafka011ErrorCode.java  | 3 +++
 .../flink/streaming/connectors/kafka/FlinkKafka011Exception.java  | 2 ++
 .../flink/streaming/connectors/kafka/FlinkKafkaConsumer011.java   | 1 +
 .../flink/streaming/connectors/kafka/Kafka011AvroTableSource.java | 2 ++
 .../flink/streaming/connectors/kafka/Kafka011JsonTableSource.java | 2 ++
 .../flink/streaming/connectors/kafka/Kafka011TableSource.java     | 2 ++
 .../streaming/connectors/kafka/internal/FlinkKafkaProducer.java   | 2 ++
 .../connectors/kafka/internal/TransactionalIdsGenerator.java      | 3 +++
 .../kafka/internal/metrics/KafkaMetricMuttableWrapper.java        | 2 ++
 .../flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java    | 1 +
 .../flink/streaming/connectors/kafka/FlinkKafkaProducer08.java    | 2 ++
 .../flink/streaming/connectors/kafka/Kafka08AvroTableSource.java  | 2 ++
 .../flink/streaming/connectors/kafka/Kafka08JsonTableSink.java    | 2 ++
 .../flink/streaming/connectors/kafka/Kafka08JsonTableSource.java  | 2 ++
 .../flink/streaming/connectors/kafka/Kafka08TableSource.java      | 2 ++
 .../streaming/connectors/kafka/internals/Kafka08Fetcher.java      | 2 ++
 .../connectors/kafka/internals/Kafka08PartitionDiscoverer.java    | 2 ++
 .../streaming/connectors/kafka/internals/KillerWatchDog.java      | 3 +++
 .../connectors/kafka/internals/PartitionInfoFetcher.java          | 3 +++
 .../connectors/kafka/internals/PeriodicOffsetCommitter.java       | 3 +++
 .../connectors/kafka/internals/SimpleConsumerThread.java          | 2 ++
 .../connectors/kafka/internals/ZookeeperOffsetHandler.java        | 2 ++
 .../flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java    | 1 +
 .../flink/streaming/connectors/kafka/FlinkKafkaProducer09.java    | 2 ++
 .../flink/streaming/connectors/kafka/Kafka09AvroTableSource.java  | 2 ++
 .../flink/streaming/connectors/kafka/Kafka09JsonTableSink.java    | 2 ++
 .../flink/streaming/connectors/kafka/Kafka09JsonTableSource.java  | 2 ++
 .../flink/streaming/connectors/kafka/Kafka09TableSource.java      | 2 ++
 .../flink/streaming/connectors/kafka/internal/Handover.java       | 2 ++
 .../flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java | 2 ++
 .../connectors/kafka/internal/Kafka09PartitionDiscoverer.java     | 2 ++
 .../connectors/kafka/internal/KafkaConsumerCallBridge.java        | 3 +++
 .../streaming/connectors/kafka/internal/KafkaConsumerThread.java  | 2 ++
 .../flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java  | 2 ++
 .../flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java  | 2 ++
 .../flink/streaming/connectors/kafka/KafkaAvroTableSource.java    | 2 ++
 .../flink/streaming/connectors/kafka/KafkaJsonTableSink.java      | 2 ++
 .../flink/streaming/connectors/kafka/KafkaJsonTableSource.java    | 2 ++
 .../apache/flink/streaming/connectors/kafka/KafkaTableSink.java   | 2 ++
 .../apache/flink/streaming/connectors/kafka/KafkaTableSource.java | 2 ++
 .../flink/streaming/connectors/kafka/config/OffsetCommitMode.java | 3 +++
 .../streaming/connectors/kafka/config/OffsetCommitModes.java      | 3 +++
 .../flink/streaming/connectors/kafka/config/StartupMode.java      | 2 ++
 .../streaming/connectors/kafka/internals/AbstractFetcher.java     | 2 ++
 .../connectors/kafka/internals/AbstractPartitionDiscoverer.java   | 3 +++
 .../connectors/kafka/internals/ClosableBlockingQueue.java         | 3 +++
 .../streaming/connectors/kafka/internals/ExceptionProxy.java      | 3 +++
 .../streaming/connectors/kafka/internals/KafkaCommitCallback.java | 3 +++
 .../streaming/connectors/kafka/internals/KafkaTopicPartition.java | 3 +++
 .../connectors/kafka/internals/KafkaTopicPartitionAssigner.java   | 3 +++
 .../connectors/kafka/internals/KafkaTopicPartitionLeader.java     | 3 +++
 .../connectors/kafka/internals/KafkaTopicPartitionState.java      | 3 +++
 .../kafka/internals/KafkaTopicPartitionStateSentinel.java         | 3 +++
 .../internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java | 2 ++
 .../KafkaTopicPartitionStateWithPunctuatedWatermarks.java         | 2 ++
 .../connectors/kafka/internals/KafkaTopicsDescriptor.java         | 3 +++
 .../connectors/kafka/internals/metrics/KafkaMetricWrapper.java    | 2 ++
 .../connectors/kafka/partitioner/FlinkFixedPartitioner.java       | 2 ++
 .../kafka/partitioner/FlinkKafkaDelegatePartitioner.java          | 3 +++
 .../connectors/kafka/partitioner/FlinkKafkaPartitioner.java       | 3 +++
 .../streaming/connectors/kafka/partitioner/KafkaPartitioner.java  | 3 +++
 .../streaming/util/serialization/JSONDeserializationSchema.java   | 2 ++
 .../util/serialization/JSONKeyValueDeserializationSchema.java     | 2 ++
 .../util/serialization/JsonRowDeserializationSchema.java          | 2 ++
 .../streaming/util/serialization/JsonRowSerializationSchema.java  | 2 ++
 .../streaming/util/serialization/KeyedDeserializationSchema.java  | 2 ++
 .../util/serialization/KeyedDeserializationSchemaWrapper.java     | 2 ++
 .../streaming/util/serialization/KeyedSerializationSchema.java    | 3 +++
 .../util/serialization/KeyedSerializationSchemaWrapper.java       | 2 ++
 .../serialization/TypeInformationKeyValueSerializationSchema.java | 2 ++
 79 files changed, 177 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
index 6fb63e1..d0b61e2 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer010.java
@@ -59,6 +59,7 @@ import java.util.regex.Pattern;
  * <p>Please refer to Kafka's documentation for the available configuration properties:
  * http://kafka.apache.org/documentation.html#newconsumerconfigs</p>
  */
+@PublicEvolving
 public class FlinkKafkaConsumer010<T> extends FlinkKafkaConsumer09<T> {
 
 	private static final long serialVersionUID = 2324564345203409112L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
index e721340..0c0cc65 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.datastream.DataStreamSink;
@@ -39,6 +40,7 @@ import java.util.Properties;
 /**
  * Flink Sink to produce data into a Kafka topic. This producer is compatible with Kafka 0.10.x
  */
+@PublicEvolving
 public class FlinkKafkaProducer010<T> extends FlinkKafkaProducer09<T> {
 
 	private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
index 660162a..f759f61 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -35,6 +36,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.10.
  */
+@PublicEvolving
 public class Kafka010AvroTableSource extends KafkaAvroTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
index 431ace0..ef33cd5 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
@@ -28,6 +29,7 @@ import java.util.Properties;
 /**
  * Kafka 0.10 {@link KafkaTableSink} that serializes data in JSON format.
  */
+@PublicEvolving
 public class Kafka010JsonTableSink extends KafkaJsonTableSink {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
index 5f9984e..bda236f 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,6 +33,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.10.
  */
+@PublicEvolving
 public class Kafka010JsonTableSource extends KafkaJsonTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
index 379c562..5a02227 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.table.api.TableSchema;
@@ -29,6 +30,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.10.
  */
+@PublicEvolving
 public abstract class Kafka010TableSource extends KafkaTableSource {
 
 	// The deserialization schema for the Kafka records

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
index eb4dfee..cddccdc 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010Fetcher.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
@@ -42,6 +43,7 @@ import java.util.Properties;
  *
  * @param <T> The type of elements produced by the fetcher.
  */
+@Internal
 public class Kafka010Fetcher<T> extends Kafka09Fetcher<T> {
 
 	public Kafka010Fetcher(

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010PartitionDiscoverer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010PartitionDiscoverer.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010PartitionDiscoverer.java
index 0c10f40..66c1535 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010PartitionDiscoverer.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka010PartitionDiscoverer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicsDescriptor;
 
 import java.util.Properties;
@@ -25,6 +26,7 @@ import java.util.Properties;
  * A partition discoverer that can be used to discover topics and partitions metadata
  * from Kafka brokers via the Kafka 0.10 high-level consumer API.
  */
+@Internal
 public class Kafka010PartitionDiscoverer extends Kafka09PartitionDiscoverer {
 
 	public Kafka010PartitionDiscoverer(

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
index b621140..5815bfa 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge010.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
+
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.TopicPartition;
 
@@ -32,6 +34,7 @@ import java.util.List;
  *
  * <p>Because of that, we need two versions whose compiled code goes against different method signatures.
  */
+@Internal
 public class KafkaConsumerCallBridge010 extends KafkaConsumerCallBridge {
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011ErrorCode.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011ErrorCode.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011ErrorCode.java
index 4f5de4f..a91e636 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011ErrorCode.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011ErrorCode.java
@@ -17,9 +17,12 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 /**
  * Error codes used in {@link FlinkKafka011Exception}.
  */
+@PublicEvolving
 public enum FlinkKafka011ErrorCode {
 	PRODUCERS_POOL_EMPTY,
 	EXTERNAL_ERROR

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
index 6b16e53..6189d8a 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
@@ -17,11 +17,13 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.util.FlinkException;
 
 /**
  * Exception used by {@link FlinkKafkaProducer011} and {@link FlinkKafkaConsumer011}.
  */
+@PublicEvolving
 public class FlinkKafka011Exception extends FlinkException {
 
 	private final FlinkKafka011ErrorCode errorCode;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer011.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer011.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer011.java
index c40463e..2b8d3f3 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer011.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer011.java
@@ -44,6 +44,7 @@ import java.util.regex.Pattern;
  * <p>Please refer to Kafka's documentation for the available configuration properties:
  * http://kafka.apache.org/documentation.html#newconsumerconfigs</p>
  */
+@PublicEvolving
 public class FlinkKafkaConsumer011<T> extends FlinkKafkaConsumer010<T> {
 
 	private static final long serialVersionUID = 2324564345203409112L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
index a9a109c..f4484f6 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -35,6 +36,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.11.
  */
+@PublicEvolving
 public class Kafka011AvroTableSource extends KafkaAvroTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
index cee7c61..a012f5d 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,6 +33,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.11.
  */
+@PublicEvolving
 public class Kafka011JsonTableSource extends KafkaJsonTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
index 8c40318..6c9c37d 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.table.api.TableSchema;
@@ -29,6 +30,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.11.
  */
+@PublicEvolving
 public abstract class Kafka011TableSource extends KafkaTableSource {
 
 	// The deserialization schema for the Kafka records

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
index 2f58d56..8faff38 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.util.Preconditions;
 
@@ -101,6 +102,7 @@ import java.util.concurrent.TimeUnit;
  * required changes via Java Reflection API. It might not be the prettiest solution. An alternative would be to
  * re-implement whole Kafka's 0.11 REST API client on our own.
  */
+@PublicEvolving
 public class FlinkKafkaProducer<K, V> implements Producer<K, V> {
 	private static final Logger LOG = LoggerFactory.getLogger(FlinkKafkaProducer.class);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/TransactionalIdsGenerator.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/TransactionalIdsGenerator.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/TransactionalIdsGenerator.java
index 2c4e6c9..ffebf56 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/TransactionalIdsGenerator.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/TransactionalIdsGenerator.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
+
 import java.util.HashSet;
 import java.util.Set;
 
@@ -34,6 +36,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * </ul>
  * In other words, any particular generated id will always be assigned to one and only one subtask.
  */
+@Internal
 public class TransactionalIdsGenerator {
 	private final String prefix;
 	private final int subtaskIndex;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/metrics/KafkaMetricMuttableWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/metrics/KafkaMetricMuttableWrapper.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/metrics/KafkaMetricMuttableWrapper.java
index a22ff5c..3ff6363 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/metrics/KafkaMetricMuttableWrapper.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/metrics/KafkaMetricMuttableWrapper.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal.metrics;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.metrics.Gauge;
 
 import org.apache.kafka.common.Metric;
@@ -25,6 +26,7 @@ import org.apache.kafka.common.Metric;
 /**
  * Gauge for getting the current value of a Kafka metric.
  */
+@Internal
 public class KafkaMetricMuttableWrapper implements Gauge<Double> {
 	private org.apache.kafka.common.Metric kafkaMetric;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
index f362046..3718476 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
@@ -81,6 +81,7 @@ import static org.apache.flink.util.PropertiesUtil.getLong;
  * <p>When using a Kafka topic to send data between Flink jobs, we recommend using the
  * {@see TypeInformationSerializationSchema} and {@see TypeInformationKeyValueSerializationSchema}.</p>
  */
+@PublicEvolving
 public class FlinkKafkaConsumer08<T> extends FlinkKafkaConsumerBase<T> {
 
 	private static final long serialVersionUID = -6272159445203409112L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
index fa80252..6c8690e 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
@@ -36,6 +37,7 @@ import java.util.Properties;
  *
  * @param <IN> Type of the messages to write into Kafka.
  */
+@PublicEvolving
 public class FlinkKafkaProducer08<IN> extends FlinkKafkaProducerBase<IN>  {
 
 	private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
index 9105c73..3c16722 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -35,6 +36,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.8.
  */
+@PublicEvolving
 public class Kafka08AvroTableSource extends KafkaAvroTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
index 39d5cb2..c60288d 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
@@ -30,6 +31,7 @@ import java.util.Properties;
 /**
  * Kafka 0.8 {@link KafkaTableSink} that serializes data in JSON format.
  */
+@PublicEvolving
 public class Kafka08JsonTableSink extends KafkaJsonTableSink {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
index 639093d..7387056 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,6 +33,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.8.
  */
+@PublicEvolving
 public class Kafka08JsonTableSource extends KafkaJsonTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
index 3bb6a94..918964d 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.table.api.TableSchema;
@@ -29,6 +30,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.8.
  */
+@PublicEvolving
 public abstract class Kafka08TableSource extends KafkaTableSource {
 
 	// The deserialization schema for the Kafka records

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
index bd62bb8..23d87d7 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
@@ -57,6 +58,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * @param <T> The type of elements produced by the fetcher.
  */
+@Internal
 public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 
 	static final KafkaTopicPartitionState<TopicAndPartition> MARKER =

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08PartitionDiscoverer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08PartitionDiscoverer.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08PartitionDiscoverer.java
index 5f7c370..5eac6ed 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08PartitionDiscoverer.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08PartitionDiscoverer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.util.NetUtils;
 
 import kafka.cluster.Broker;
@@ -49,6 +50,7 @@ import static org.apache.flink.util.PropertiesUtil.getInt;
  * A partition discoverer that can be used to discover topics and partitions metadata
  * from Kafka brokers via the Kafka 0.8 low-level consumer API.
  */
+@Internal
 public class Kafka08PartitionDiscoverer extends AbstractPartitionDiscoverer {
 
 	private static final Logger LOG = LoggerFactory.getLogger(Kafka08PartitionDiscoverer.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
index b5998f4..55672a9 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KillerWatchDog.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * A watch dog thread that forcibly kills another thread, if that thread does not
  * finish in time.
@@ -26,6 +28,7 @@ package org.apache.flink.streaming.connectors.kafka.internals;
  * advisable, this watch dog is only for extreme cases of thread that simply
  * to not terminate otherwise.
  */
+@Internal
 class KillerWatchDog extends Thread {
 
 	private final Thread toKill;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
index 836ed6b..3762dd5 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PartitionInfoFetcher.java
@@ -18,9 +18,12 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import java.util.List;
 import java.util.Properties;
 
+@Internal
 class PartitionInfoFetcher extends Thread {
 
 	private final List<String> topics;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
index ae4044c..b511653 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/PeriodicOffsetCommitter.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import kafka.common.TopicAndPartition;
 
 import java.util.HashMap;
@@ -29,6 +31,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 /**
  * A thread that periodically writes the current Kafka partition offsets to Zookeeper.
  */
+@Internal
 public class PeriodicOffsetCommitter extends Thread {
 
 	/** The ZooKeeper handler. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
index abc61fa..4c704c3 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/SimpleConsumerThread.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema;
 import org.apache.flink.util.ExceptionUtils;
 
@@ -56,6 +57,7 @@ import static org.apache.flink.util.PropertiesUtil.getInt;
  * @param <T> The type of elements that this consumer thread creates from Kafka's byte messages
  *            and emits into the Flink DataStream.
  */
+@Internal
 class SimpleConsumerThread<T> extends Thread {
 
 	private static final Logger LOG = LoggerFactory.getLogger(SimpleConsumerThread.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
index b6822e2..dd81069 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ZookeeperOffsetHandler.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.configuration.ConfigConstants;
 
 import kafka.utils.ZKGroupTopicDirs;
@@ -36,6 +37,7 @@ import java.util.Properties;
 /**
  * Handler for committing Kafka offsets to Zookeeper and to retrieve them again.
  */
+@Internal
 public class ZookeeperOffsetHandler {
 
 	private static final Logger LOG = LoggerFactory.getLogger(ZookeeperOffsetHandler.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
index 79be73c..578a2d2 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer09.java
@@ -65,6 +65,7 @@ import static org.apache.flink.util.PropertiesUtil.getLong;
  * <p>Please refer to Kafka's documentation for the available configuration properties:
  * http://kafka.apache.org/documentation.html#newconsumerconfigs</p>
  */
+@PublicEvolving
 public class FlinkKafkaConsumer09<T> extends FlinkKafkaConsumerBase<T> {
 
 	private static final long serialVersionUID = 2324564345203409112L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
index 7f00c92..2d6bcef 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer09.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
@@ -36,6 +37,7 @@ import java.util.Properties;
  *
  * @param <IN> Type of the messages to write into Kafka.
  */
+@PublicEvolving
 public class FlinkKafkaProducer09<IN> extends FlinkKafkaProducerBase<IN> {
 
 	private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
index fb8496a..7edbe65 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -35,6 +36,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.9.
  */
+@PublicEvolving
 public class Kafka09AvroTableSource extends KafkaAvroTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
index a4d2661..95ce4e6 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
@@ -30,6 +31,7 @@ import java.util.Properties;
 /**
  * Kafka 0.9 {@link KafkaTableSink} that serializes data in JSON format.
  */
+@PublicEvolving
 public class Kafka09JsonTableSink extends KafkaJsonTableSink {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
index ded23b0..a7e54be 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,6 +33,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.9.
  */
+@PublicEvolving
 public class Kafka09JsonTableSource extends KafkaJsonTableSource {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
index df15452..31518de 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.table.api.TableSchema;
@@ -29,6 +30,7 @@ import java.util.Properties;
 /**
  * Kafka {@link StreamTableSource} for Kafka 0.9.
  */
+@PublicEvolving
 public abstract class Kafka09TableSource extends KafkaTableSource {
 
 	// The deserialization schema for the Kafka records

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
index 0897f53..02cbd92 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Handover.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.util.ExceptionUtils;
 
 import org.apache.kafka.clients.consumer.ConsumerRecords;
@@ -45,6 +46,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * the thread has terminated.
  */
 @ThreadSafe
+@Internal
 public final class Handover implements Closeable {
 
 	private final Object lock = new Object();

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
index 82edbec..162cc09 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
@@ -52,6 +53,7 @@ import static org.apache.flink.util.Preconditions.checkState;
  *
  * @param <T> The type of elements produced by the fetcher.
  */
+@Internal
 public class Kafka09Fetcher<T> extends AbstractFetcher<T, TopicPartition> {
 
 	private static final Logger LOG = LoggerFactory.getLogger(Kafka09Fetcher.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09PartitionDiscoverer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09PartitionDiscoverer.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09PartitionDiscoverer.java
index bde3d33..5e55995 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09PartitionDiscoverer.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09PartitionDiscoverer.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicsDescriptor;
@@ -35,6 +36,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * A partition discoverer that can be used to discover topics and partitions metadata
  * from Kafka brokers via the Kafka 0.9 high-level consumer API.
  */
+@Internal
 public class Kafka09PartitionDiscoverer extends AbstractPartitionDiscoverer {
 
 	private final Properties kafkaProperties;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
index c0b9441..b789633 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerCallBridge.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
+
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.TopicPartition;
 
@@ -33,6 +35,7 @@ import java.util.List;
  * Even though the source of subclasses may look identical, the byte code will be different, because they
  * are compiled against different dependencies.
  */
+@Internal
 public class KafkaConsumerCallBridge {
 
 	public void assignPartitions(KafkaConsumer<?, ?> consumer, List<TopicPartition> topicPartitions) throws Exception {

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
index fc5f359..022cf99 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/KafkaConsumerThread.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internal;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.connectors.kafka.internals.ClosableBlockingQueue;
@@ -60,6 +61,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * Because Kafka is not maintaining binary compatibility, we use a "call bridge" as an indirection
  * to the KafkaConsumer calls that change signature.
  */
+@Internal
 public class KafkaConsumerThread extends Thread {
 
 	/** Logger for this consumer. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
index 2193d75..660af21 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.state.ListState;
 import org.apache.flink.api.common.state.ListStateDescriptor;
@@ -76,6 +77,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * @param <T> The type of records produced by this data source
  */
+@Internal
 public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFunction<T> implements
 		CheckpointListener,
 		ResultTypeQueryable<T>,

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
index cf07a23..a4437d4 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBase.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.functions.RuntimeContext;
 import org.apache.flink.api.java.ClosureCleaner;
@@ -65,6 +66,7 @@ import static java.util.Objects.requireNonNull;
  *
  * @param <IN> Type of the messages to write into Kafka.
  */
+@Internal
 public abstract class FlinkKafkaProducerBase<IN> extends RichSinkFunction<IN> implements CheckpointedFunction {
 
 	private static final Logger LOG = LoggerFactory.getLogger(FlinkKafkaProducerBase.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
index bf2e9db..1587798 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -46,6 +47,7 @@ import java.util.Properties;
  * <p>The version-specific Kafka consumers need to extend this class and
  * override {@link #createKafkaConsumer(String, Properties, DeserializationSchema)}}.
  */
+@Internal
 public abstract class KafkaAvroTableSource extends KafkaTableSource implements DefinedFieldMapping {
 
 	private final Class<? extends SpecificRecordBase> avroRecordClass;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
index 6665dbd..bfc2143 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
@@ -29,6 +30,7 @@ import java.util.Properties;
 /**
  * Base class for {@link KafkaTableSink} that serializes data in JSON format.
  */
+@Internal
 public abstract class KafkaJsonTableSink extends KafkaTableSink {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
index 6806673..7de7f34 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
 import org.apache.flink.streaming.util.serialization.JsonRowDeserializationSchema;
@@ -36,6 +37,7 @@ import java.util.Properties;
  *
  * <p>The field names are used to parse the JSON file and so are the types.
  */
+@Internal
 public abstract class KafkaJsonTableSource extends KafkaTableSource implements DefinedFieldMapping {
 
 	private TableSchema jsonSchema;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
index f10d276..687df58 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
@@ -36,6 +37,7 @@ import java.util.Properties;
  * <p>The version-specific Kafka consumers need to extend this class and
  * override {@link #createKafkaProducer(String, Properties, SerializationSchema, FlinkKafkaPartitioner)}}.
  */
+@Internal
 public abstract class KafkaTableSink implements AppendStreamTableSink<Row> {
 
 	protected final String topic;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
index 385d6ad..d5cda4a 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.streaming.api.datastream.DataStream;
@@ -51,6 +52,7 @@ import scala.Option;
  * <p>The version-specific Kafka consumers need to extend this class and
  * override {@link #createKafkaConsumer(String, Properties, DeserializationSchema)}}.
  */
+@Internal
 public abstract class KafkaTableSource
 	implements StreamTableSource<Row>, DefinedProctimeAttribute, DefinedRowtimeAttributes {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
index 0642e7e..85a850b 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitMode.java
@@ -17,12 +17,15 @@
 
 package org.apache.flink.streaming.connectors.kafka.config;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * The offset commit mode represents the behaviour of how offsets are externally committed
  * back to Kafka brokers / Zookeeper.
  *
  * <p>The exact value of this is determined at runtime in the consumer subtasks.
  */
+@Internal
 public enum OffsetCommitMode {
 
 	/** Completely disable offset committing. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
index 85dc263..134004e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
@@ -17,9 +17,12 @@
 
 package org.apache.flink.streaming.connectors.kafka.config;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * Utilities for {@link OffsetCommitMode}.
  */
+@Internal
 public class OffsetCommitModes {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
index 81c4138..f984c82 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/StartupMode.java
@@ -17,11 +17,13 @@
 
 package org.apache.flink.streaming.connectors.kafka.config;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionStateSentinel;
 
 /**
  * Startup modes for the Kafka Consumer.
  */
+@Internal
 public enum StartupMode {
 
 	/** Start from committed offsets in ZK / Kafka brokers of a specific consumer group (default). */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
index 6ed2b08..c48a2b5 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.metrics.Gauge;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
@@ -57,6 +58,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *            the Flink data streams.
  * @param <KPH> The type of topic/partition identifier used by Kafka in the specific version.
  */
+@Internal
 public abstract class AbstractFetcher<T, KPH> {
 
 	protected static final int NO_TIMESTAMPS_WATERMARKS = 0;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscoverer.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscoverer.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscoverer.java
index b336fdc..cca24b7 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscoverer.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscoverer.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -38,6 +40,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * not be concurrently accessed. The only exception for this would be the {@link #wakeup()}
  * call, which allows the discoverer to be interrupted during a {@link #discoverPartitions()} call.
  */
+@Internal
 public abstract class AbstractPartitionDiscoverer {
 
 	/** Describes whether we are discovering partitions for fixed topics or a topic pattern. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
index db32733..826c8de 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -45,6 +47,7 @@ import static java.util.Objects.requireNonNull;
  *
  * @param <E> The type of elements in the queue.
  */
+@Internal
 public class ClosableBlockingQueue<E> {
 
 	/** The lock used to make queue accesses and open checks atomic. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
index 06cdf2c..3518f2a 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ExceptionProxy.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import javax.annotation.Nullable;
 
 import java.util.concurrent.atomic.AtomicReference;
@@ -66,6 +68,7 @@ import java.util.concurrent.atomic.AtomicReference;
  * }
  * </pre>
  */
+@Internal
 public class ExceptionProxy {
 
 	/** The thread that should be interrupted when an exception occurs. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaCommitCallback.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaCommitCallback.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaCommitCallback.java
index aca7ae5..04ed1e9 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaCommitCallback.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaCommitCallback.java
@@ -18,10 +18,13 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * A callback interface that the source operator can implement to trigger custom actions when a commit request completes,
  * which should normally be triggered from checkpoint complete event.
  */
+@Internal
 public interface KafkaCommitCallback {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
index d35d585..85eb2e5 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartition.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
@@ -31,6 +33,7 @@ import static java.util.Objects.requireNonNull;
  * <p>Note: This class must not change in its structure, because it would change the
  * serialization format and make previous savepoints unreadable.
  */
+@PublicEvolving
 public final class KafkaTopicPartition implements Serializable {
 
 	/** THIS SERIAL VERSION UID MUST NOT CHANGE, BECAUSE IT WOULD BREAK

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionAssigner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionAssigner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionAssigner.java
index 944630f..120677e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionAssigner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionAssigner.java
@@ -17,9 +17,12 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * Utility for assigning Kafka partitions to consumer subtasks.
  */
+@Internal
 public class KafkaTopicPartitionAssigner {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionLeader.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionLeader.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionLeader.java
index 1959a05..d08f7dc 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionLeader.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionLeader.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import org.apache.kafka.common.Node;
 
 import java.io.Serializable;
@@ -25,6 +27,7 @@ import java.io.Serializable;
  * Serializable Topic Partition info with leader Node information.
  * This class is used at runtime.
  */
+@Internal
 public class KafkaTopicPartitionLeader implements Serializable {
 
 	private static final long serialVersionUID = 9145855900303748582L;


[19/19] flink git commit: [hotfix] [kafka] Add missing serialVersionUIDs to all Kafka connector Serializable classes

Posted by tz...@apache.org.
[hotfix] [kafka] Add missing serialVersionUIDs to all Kafka connector Serializable classes


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4ade8263
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4ade8263
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4ade8263

Branch: refs/heads/master
Commit: 4ade82631b43c28262c303c4ba028270652f4db3
Parents: 4ceabed
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 17:33:12 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:29 2018 +0800

----------------------------------------------------------------------
 .../flink/streaming/connectors/kafka/FlinkKafka011Exception.java  | 2 ++
 .../connectors/kafka/partitioner/FlinkFixedPartitioner.java       | 2 ++
 .../streaming/util/serialization/JSONDeserializationSchema.java   | 3 +++
 .../util/serialization/JSONKeyValueDeserializationSchema.java     | 3 +++
 .../util/serialization/JsonRowDeserializationSchema.java          | 2 ++
 .../streaming/util/serialization/JsonRowSerializationSchema.java  | 3 +++
 6 files changed, 15 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/4ade8263/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
index 6189d8a..61b0ff6 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafka011Exception.java
@@ -26,6 +26,8 @@ import org.apache.flink.util.FlinkException;
 @PublicEvolving
 public class FlinkKafka011Exception extends FlinkException {
 
+	private static final long serialVersionUID = 920269130311214200L;
+
 	private final FlinkKafka011ErrorCode errorCode;
 
 	public FlinkKafka011Exception(FlinkKafka011ErrorCode errorCode, String message) {

http://git-wip-us.apache.org/repos/asf/flink/blob/4ade8263/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
index 906238d..6e83ddd 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
@@ -54,6 +54,8 @@ import org.apache.flink.util.Preconditions;
 @PublicEvolving
 public class FlinkFixedPartitioner<T> extends FlinkKafkaPartitioner<T> {
 
+	private static final long serialVersionUID = -3785320239953858777L;
+
 	private int parallelInstanceId;
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/4ade8263/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
index 8c572c2..900c094 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
@@ -32,6 +32,9 @@ import java.io.IOException;
  */
 @PublicEvolving
 public class JSONDeserializationSchema extends AbstractDeserializationSchema<ObjectNode> {
+
+	private static final long serialVersionUID = -1699854177598621044L;
+
 	private ObjectMapper mapper;
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/4ade8263/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
index 0168eb7..caffcec 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
@@ -40,6 +40,9 @@ import static org.apache.flink.api.java.typeutils.TypeExtractor.getForClass;
  */
 @PublicEvolving
 public class JSONKeyValueDeserializationSchema implements KeyedDeserializationSchema<ObjectNode> {
+
+	private static final long serialVersionUID = 1509391548173891955L;
+
 	private final boolean includeMetadata;
 	private ObjectMapper mapper;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ade8263/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
index 1f4a60e..21215cd 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
@@ -41,6 +41,8 @@ import java.io.IOException;
 @PublicEvolving
 public class JsonRowDeserializationSchema implements DeserializationSchema<Row> {
 
+	private static final long serialVersionUID = -228294330688809195L;
+
 	/** Type information describing the result type. */
 	private final TypeInformation<Row> typeInfo;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ade8263/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
index 3e72506..44ce7ea 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
@@ -40,6 +40,9 @@ import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.Obje
  */
 @PublicEvolving
 public class JsonRowSerializationSchema implements SerializationSchema<Row> {
+
+	private static final long serialVersionUID = -2885556750743978636L;
+
 	/** Fields names in the input Row object. */
 	private final String[] fieldNames;
 	/** Object mapper that is used to create output JSON objects. */


[17/19] flink git commit: [FLINK-8276] [kafka] Properly annotate APIs for Kafka connector

Posted by tz...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
index 78ab612..983d99c 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionState.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * The state that the Flink Kafka Consumer holds for each Kafka partition.
  * Includes the Kafka descriptor for partitions.
@@ -27,6 +29,7 @@ package org.apache.flink.streaming.connectors.kafka.internals;
  *
  * @param <KPH> The type of the Kafka partition descriptor, which varies across Kafka versions.
  */
+@Internal
 public class KafkaTopicPartitionState<KPH> {
 
 	// ------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
index 3857991..68f842a 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateSentinel.java
@@ -17,12 +17,15 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * Magic values used to represent special offset states before partitions are actually read.
  *
  * <p>The values are all negative. Negative offsets are not used by Kafka (invalid), so we
  * pick a number that is probably (hopefully) not used by Kafka as a magic number for anything else.
  */
+@Internal
 public class KafkaTopicPartitionStateSentinel {
 
 	/** Magic number that defines an unset offset. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
index 5116e9f..015ac71 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPeriodicWatermarks.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
 import org.apache.flink.streaming.api.watermark.Watermark;
 
@@ -27,6 +28,7 @@ import org.apache.flink.streaming.api.watermark.Watermark;
  * @param <T> The type of records handled by the watermark generator
  * @param <KPH> The type of the Kafka partition descriptor, which varies across Kafka versions.
  */
+@Internal
 public final class KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH> extends KafkaTopicPartitionState<KPH> {
 
 	/** The timestamp assigner and watermark generator for the partition. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
index f4a80a4..aedddf3 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionStateWithPunctuatedWatermarks.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks;
 import org.apache.flink.streaming.api.watermark.Watermark;
 
@@ -32,6 +33,7 @@ import javax.annotation.Nullable;
  * @param <T> The type of records handled by the watermark generator
  * @param <KPH> The type of the Kafka partition descriptor, which varies across Kafka versions
  */
+@Internal
 public final class KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> extends KafkaTopicPartitionState<KPH> {
 
 	/** The timestamp assigner and watermark generator for the partition. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptor.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptor.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptor.java
index 9a81ea8..ddea63b 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptor.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptor.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals;
 
+import org.apache.flink.annotation.Internal;
+
 import javax.annotation.Nullable;
 
 import java.io.Serializable;
@@ -30,6 +32,7 @@ import static org.apache.flink.util.Preconditions.checkArgument;
  * A Kafka Topics Descriptor describes how the consumer subscribes to Kafka topics -
  * either a fixed list of topics, or a topic pattern.
  */
+@Internal
 public class KafkaTopicsDescriptor implements Serializable {
 
 	private static final long serialVersionUID = -3807227764764900975L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaMetricWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaMetricWrapper.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaMetricWrapper.java
index cedb696..4a5fb9d 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaMetricWrapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaMetricWrapper.java
@@ -18,11 +18,13 @@
 
 package org.apache.flink.streaming.connectors.kafka.internals.metrics;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.metrics.Gauge;
 
 /**
  * Gauge for getting the current value of a Kafka metric.
  */
+@Internal
 public class KafkaMetricWrapper implements Gauge<Double> {
 	private final org.apache.kafka.common.Metric kafkaMetric;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
index 6ed3717..906238d 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.connectors.kafka.partitioner;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.util.Preconditions;
 
 /**
@@ -50,6 +51,7 @@ import org.apache.flink.util.Preconditions;
  * To avoid such an unbalanced partitioning, use a round-robin kafka partitioner (note that this will
  * cause a lot of network connections between all the Flink instances and all the Kafka brokers).
  */
+@PublicEvolving
 public class FlinkFixedPartitioner<T> extends FlinkKafkaPartitioner<T> {
 
 	private int parallelInstanceId;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
index 168e76b..5a42dc6 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaDelegatePartitioner.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.partitioner;
 
+import org.apache.flink.annotation.Internal;
+
 /**
  * Delegate for the deprecated {@link KafkaPartitioner}.
  * This should only be used for bridging deprecated partitioning API methods.
@@ -25,6 +27,7 @@ package org.apache.flink.streaming.connectors.kafka.partitioner;
  * @deprecated Delegate for {@link KafkaPartitioner}, use {@link FlinkKafkaPartitioner} instead
  */
 @Deprecated
+@Internal
 public class FlinkKafkaDelegatePartitioner<T> extends FlinkKafkaPartitioner<T> {
 	private final KafkaPartitioner<T> kafkaPartitioner;
 	private int[] partitions;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaPartitioner.java
index b634af7..d1df6d9 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkKafkaPartitioner.java
@@ -17,12 +17,15 @@
 
 package org.apache.flink.streaming.connectors.kafka.partitioner;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.Serializable;
 
 /**
  * A {@link FlinkKafkaPartitioner} wraps logic on how to partition records
  * across partitions of multiple Kafka topics.
  */
+@PublicEvolving
 public abstract class FlinkKafkaPartitioner<T> implements Serializable {
 
 	private static final long serialVersionUID = -9086719227828020494L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/KafkaPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/KafkaPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/KafkaPartitioner.java
index eebc619..a2cd4a6 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/KafkaPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/KafkaPartitioner.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.connectors.kafka.partitioner;
 
+import org.apache.flink.annotation.Internal;
+
 import java.io.Serializable;
 
 /**
@@ -27,6 +29,7 @@ import java.io.Serializable;
  *             multiple topics, and has been deprecated. Please use {@link FlinkKafkaPartitioner} instead.
  */
 @Deprecated
+@Internal
 public abstract class KafkaPartitioner<T> implements Serializable {
 
 	private static final long serialVersionUID = -1974260817778593473L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
index f60a0b7..8c572c2 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONDeserializationSchema.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.AbstractDeserializationSchema;
 
 import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
@@ -29,6 +30,7 @@ import java.io.IOException;
  *
  * <p>Fields can be accessed by calling objectNode.get(&lt;name>).as(&lt;type>)
  */
+@PublicEvolving
 public class JSONDeserializationSchema extends AbstractDeserializationSchema<ObjectNode> {
 	private ObjectMapper mapper;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
index 30f0fd5..0168eb7 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JSONKeyValueDeserializationSchema.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 
 import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
@@ -37,6 +38,7 @@ import static org.apache.flink.api.java.typeutils.TypeExtractor.getForClass;
  * <p>Metadata fields can be accessed by calling objectNode.get("metadata").get(&lt;name>).as(&lt;type>) and include
  * the "offset" (long), "topic" (String) and "partition" (int).
  */
+@PublicEvolving
 public class JSONKeyValueDeserializationSchema implements KeyedDeserializationSchema<ObjectNode> {
 	private final boolean includeMetadata;
 	private ObjectMapper mapper;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
index 100f960..1f4a60e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowDeserializationSchema.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
@@ -37,6 +38,7 @@ import java.io.IOException;
  *
  * <p>Failure during deserialization are forwarded as wrapped IOExceptions.
  */
+@PublicEvolving
 public class JsonRowDeserializationSchema implements DeserializationSchema<Row> {
 
 	/** Type information describing the result type. */

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
index 36d3137..3e72506 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/JsonRowSerializationSchema.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.CompositeType;
@@ -37,6 +38,7 @@ import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.Obje
  * <p>Result <code>byte[]</code> messages can be deserialized using
  * {@link JsonRowDeserializationSchema}.
  */
+@PublicEvolving
 public class JsonRowSerializationSchema implements SerializationSchema<Row> {
 	/** Fields names in the input Row object. */
 	private final String[] fieldNames;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
index 234a96d..0ef6fd5 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchema.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 
 import java.io.IOException;
@@ -29,6 +30,7 @@ import java.io.Serializable;
  *
  * @param <T> The type created by the keyed deserialization schema.
  */
+@PublicEvolving
 public interface KeyedDeserializationSchema<T> extends Serializable, ResultTypeQueryable<T> {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
index 93b4f68..06289e5 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedDeserializationSchemaWrapper.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 
@@ -27,6 +28,7 @@ import java.io.IOException;
  * interface.
  * @param <T> The type created by the deserialization schema.
  */
+@Internal
 public class KeyedDeserializationSchemaWrapper<T> implements KeyedDeserializationSchema<T> {
 
 	private static final long serialVersionUID = 2651665280744549932L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
index 12bcab9..2f610c2 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchema.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.Serializable;
 
 /**
@@ -26,6 +28,7 @@ import java.io.Serializable;
  *
  * @param <T> The type to be serialized.
  */
+@PublicEvolving
 public interface KeyedSerializationSchema<T> extends Serializable {
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
index 70ae897..013ea62 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/KeyedSerializationSchemaWrapper.java
@@ -17,6 +17,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 
 /**
@@ -24,6 +25,7 @@ import org.apache.flink.api.common.serialization.SerializationSchema;
  * interface.
  * @param <T> The type to serialize
  */
+@Internal
 public class KeyedSerializationSchemaWrapper<T> implements KeyedSerializationSchema<T> {
 
 	private static final long serialVersionUID = 1351665280744549933L;

http://git-wip-us.apache.org/repos/asf/flink/blob/4ceabed9/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
index 96b8879..3be5779 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/TypeInformationKeyValueSerializationSchema.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.util.serialization;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
@@ -36,6 +37,7 @@ import java.io.IOException;
  * @param <K> The key type to be serialized.
  * @param <V> The value type to be serialized.
  */
+@Internal
 public class TypeInformationKeyValueSerializationSchema<K, V> implements KeyedDeserializationSchema<Tuple2<K, V>>, KeyedSerializationSchema<Tuple2<K, V>> {
 
 	private static final long serialVersionUID = -5359448468131559102L;


[14/19] flink git commit: [hotfix] [kinesis] Add serialVersionUID to KinesisPartitioner

Posted by tz...@apache.org.
[hotfix] [kinesis] Add serialVersionUID to KinesisPartitioner


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/5a318de9
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/5a318de9
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/5a318de9

Branch: refs/heads/master
Commit: 5a318de97734df52dcc641c0594ead9df18b5e2f
Parents: 30734d5
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 16:57:38 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../flink/streaming/connectors/kinesis/KinesisPartitioner.java     | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/5a318de9/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
index 6082346..70528dc 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
@@ -29,6 +29,8 @@ import java.io.Serializable;
 @PublicEvolving
 public abstract class KinesisPartitioner<T> implements Serializable {
 
+	private static final long serialVersionUID = -7467294664702189780L;
+
 	/**
 	 * Return a partition id based on the input.
 	 * @param element Element to partition


[13/19] flink git commit: [hotfix] [doc] Fixed doc typo in DataStream API

Posted by tz...@apache.org.
[hotfix] [doc] Fixed doc typo in DataStream API

This closes #5283.
This closes #5191.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4496248a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4496248a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4496248a

Branch: refs/heads/master
Commit: 4496248ace3e10808aa56a2c1f91a7cb59b38423
Parents: 77e63e6
Author: Alejandro Alcalde <al...@gmail.com>
Authored: Thu Jan 11 13:49:29 2018 +0100
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 docs/dev/datastream_api.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/4496248a/docs/dev/datastream_api.md
----------------------------------------------------------------------
diff --git a/docs/dev/datastream_api.md b/docs/dev/datastream_api.md
index 09580b1..d7ab2e7 100644
--- a/docs/dev/datastream_api.md
+++ b/docs/dev/datastream_api.md
@@ -161,7 +161,7 @@ File-based:
 
     *IMPLEMENTATION:*
 
-    Under the hood, Flink splits the file reading process into two sub-tasks, namely *directory monitoring* and *data reading*. Each of these sub-tasks is implemented by a separate entity. Monitoring is implemented by a single, **non-parallel** (parallelism = 1) task, while reading is performed by multiple tasks running in parallel. The parallelism of the latter is equal to the job parallelism. The role of the single monitoring task is to scan the directory (periodically or only once depending on the `watchType`), find the files to be processed, divide them in *splits*, and assign these splits to the downstream readers. The readers are the ones who will read the actual data. Each split is read by only one reader, while a reader can read muplitple splits, one-by-one.
+    Under the hood, Flink splits the file reading process into two sub-tasks, namely *directory monitoring* and *data reading*. Each of these sub-tasks is implemented by a separate entity. Monitoring is implemented by a single, **non-parallel** (parallelism = 1) task, while reading is performed by multiple tasks running in parallel. The parallelism of the latter is equal to the job parallelism. The role of the single monitoring task is to scan the directory (periodically or only once depending on the `watchType`), find the files to be processed, divide them in *splits*, and assign these splits to the downstream readers. The readers are the ones who will read the actual data. Each split is read by only one reader, while a reader can read multiple splits, one-by-one.
 
     *IMPORTANT NOTES:*
 
@@ -219,7 +219,7 @@ File-based:
 
     *IMPLEMENTATION:*
 
-    Under the hood, Flink splits the file reading process into two sub-tasks, namely *directory monitoring* and *data reading*. Each of these sub-tasks is implemented by a separate entity. Monitoring is implemented by a single, **non-parallel** (parallelism = 1) task, while reading is performed by multiple tasks running in parallel. The parallelism of the latter is equal to the job parallelism. The role of the single monitoring task is to scan the directory (periodically or only once depending on the `watchType`), find the files to be processed, divide them in *splits*, and assign these splits to the downstream readers. The readers are the ones who will read the actual data. Each split is read by only one reader, while a reader can read muplitple splits, one-by-one.
+    Under the hood, Flink splits the file reading process into two sub-tasks, namely *directory monitoring* and *data reading*. Each of these sub-tasks is implemented by a separate entity. Monitoring is implemented by a single, **non-parallel** (parallelism = 1) task, while reading is performed by multiple tasks running in parallel. The parallelism of the latter is equal to the job parallelism. The role of the single monitoring task is to scan the directory (periodically or only once depending on the `watchType`), find the files to be processed, divide them in *splits*, and assign these splits to the downstream readers. The readers are the ones who will read the actual data. Each split is read by only one reader, while a reader can read multiple splits, one-by-one.
 
     *IMPORTANT NOTES:*
 


[09/19] flink git commit: [FLINK-8324] [kafka, metrics] Make clear which metrics are legacy and should not be touched

Posted by tz...@apache.org.
[FLINK-8324] [kafka, metrics] Make clear which metrics are legacy and should not be touched

This commit consolidates all metrics related constant string names in a
KafkaConsumerMetricConstants class, to give a better overview code-wise
which metrics are exported.

That makes it more clear in the code which metrics are kept for
compatibility reasons. It also additionally states that metric names
should not be changed, otherwise metrics compatibility will be broken.

This closes #5214.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/82a9ae59
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/82a9ae59
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/82a9ae59

Branch: refs/heads/master
Commit: 82a9ae596e185a3fd0b6bc9ee59d3a3a8022960a
Parents: 6f6b3c8
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 11:31:02 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../kafka/internals/Kafka08Fetcher.java         |  3 +-
 .../kafka/internal/Kafka09Fetcher.java          |  3 +-
 .../kafka/FlinkKafkaConsumerBase.java           |  6 ++-
 .../kafka/internals/AbstractFetcher.java        | 30 +++++++----
 .../metrics/KafkaConsumerMetricConstants.java   | 56 ++++++++++++++++++++
 5 files changed, 84 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/82a9ae59/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
index 8bcd663..bd62bb8 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/Kafka08Fetcher.java
@@ -47,6 +47,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.KAFKA_CONSUMER_METRICS_GROUP;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
@@ -177,7 +178,7 @@ public class Kafka08Fetcher<T> extends AbstractFetcher<T, TopicAndPartition> {
 
 			// register offset metrics
 			if (useMetrics) {
-				final MetricGroup kafkaMetricGroup = runtimeContext.getMetricGroup().addGroup("KafkaConsumer");
+				final MetricGroup kafkaMetricGroup = runtimeContext.getMetricGroup().addGroup(KAFKA_CONSUMER_METRICS_GROUP);
 				addOffsetStateGauge(kafkaMetricGroup);
 			}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/82a9ae59/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
index 51f69cd..82edbec 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/Kafka09Fetcher.java
@@ -44,6 +44,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.KAFKA_CONSUMER_METRICS_GROUP;
 import static org.apache.flink.util.Preconditions.checkState;
 
 /**
@@ -98,7 +99,7 @@ public class Kafka09Fetcher<T> extends AbstractFetcher<T, TopicPartition> {
 		this.deserializer = deserializer;
 		this.handover = new Handover();
 
-		final MetricGroup kafkaMetricGroup = metricGroup.addGroup("KafkaConsumer");
+		final MetricGroup kafkaMetricGroup = metricGroup.addGroup(KAFKA_CONSUMER_METRICS_GROUP);
 		addOffsetStateGauge(kafkaMetricGroup);
 
 		this.consumerThread = new KafkaConsumerThread(

http://git-wip-us.apache.org/repos/asf/flink/blob/82a9ae59/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
index 865d66c..c350442 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBase.java
@@ -62,6 +62,8 @@ import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
 
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.COMMITS_FAILED_METRICS_COUNTER;
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.COMMITS_SUCCEEDED_METRICS_COUNTER;
 import static org.apache.flink.util.Preconditions.checkArgument;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
@@ -522,8 +524,8 @@ public abstract class FlinkKafkaConsumerBase<T> extends RichParallelSourceFuncti
 		}
 
 		// initialize commit metrics and default offset callback method
-		this.successfulCommits = this.getRuntimeContext().getMetricGroup().counter("commitsSucceeded");
-		this.failedCommits =  this.getRuntimeContext().getMetricGroup().counter("commitsFailed");
+		this.successfulCommits = this.getRuntimeContext().getMetricGroup().counter(COMMITS_SUCCEEDED_METRICS_COUNTER);
+		this.failedCommits =  this.getRuntimeContext().getMetricGroup().counter(COMMITS_FAILED_METRICS_COUNTER);
 
 		this.offsetCommitCallback = new KafkaCommitCallback() {
 			@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/82a9ae59/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
index 258e3dc..6ed2b08 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcher.java
@@ -38,6 +38,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.COMMITTED_OFFSETS_METRICS_GAUGE;
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.CURRENT_OFFSETS_METRICS_GAUGE;
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.LEGACY_COMMITTED_OFFSETS_METRICS_GROUP;
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.LEGACY_CURRENT_OFFSETS_METRICS_GROUP;
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.OFFSETS_BY_PARTITION_METRICS_GROUP;
+import static org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaConsumerMetricConstants.OFFSETS_BY_TOPIC_METRICS_GROUP;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
@@ -564,21 +570,25 @@ public abstract class AbstractFetcher<T, KPH> {
 	 * @param metricGroup The metric group to use
 	 */
 	protected void addOffsetStateGauge(MetricGroup metricGroup) {
-		// add current offsets to gage
-		MetricGroup currentOffsets = metricGroup.addGroup("current-offsets");
-		MetricGroup committedOffsets = metricGroup.addGroup("committed-offsets");
-		for (KafkaTopicPartitionState<KPH> ktp : subscribedPartitionStates) {
-			currentOffsets.gauge(ktp.getTopic() + "-" + ktp.getPartition(), new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET));
-			committedOffsets.gauge(ktp.getTopic() + "-" + ktp.getPartition(), new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET));
+		MetricGroup legacyCurrentOffsetsGroup = metricGroup.addGroup(LEGACY_CURRENT_OFFSETS_METRICS_GROUP);
+		MetricGroup legacyCommittedOffsetsGroup = metricGroup.addGroup(LEGACY_COMMITTED_OFFSETS_METRICS_GROUP);
 
+		for (KafkaTopicPartitionState<KPH> ktp : subscribedPartitionStates) {
 			MetricGroup topicPartitionGroup = metricGroup
-				.addGroup("topic", ktp.getTopic())
-				.addGroup("partition", Integer.toString(ktp.getPartition()));
-			topicPartitionGroup.gauge("currentOffsets", new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET));
-			topicPartitionGroup.gauge("committedOffsets", new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET));
+				.addGroup(OFFSETS_BY_TOPIC_METRICS_GROUP, ktp.getTopic())
+				.addGroup(OFFSETS_BY_PARTITION_METRICS_GROUP, Integer.toString(ktp.getPartition()));
+			topicPartitionGroup.gauge(CURRENT_OFFSETS_METRICS_GAUGE, new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET));
+			topicPartitionGroup.gauge(COMMITTED_OFFSETS_METRICS_GAUGE, new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET));
+
+			legacyCurrentOffsetsGroup.gauge(getLegacyOffsetsMetricsGaugeName(ktp), new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET));
+			legacyCommittedOffsetsGroup.gauge(getLegacyOffsetsMetricsGaugeName(ktp), new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET));
 		}
 	}
 
+	private static String getLegacyOffsetsMetricsGaugeName(KafkaTopicPartitionState<?> ktp) {
+		return ktp.getTopic() + "-" + ktp.getPartition();
+	}
+
 	/**
 	 * Gauge types.
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/82a9ae59/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaConsumerMetricConstants.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaConsumerMetricConstants.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaConsumerMetricConstants.java
new file mode 100644
index 0000000..fcd7d6a
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/metrics/KafkaConsumerMetricConstants.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka.internals.metrics;
+
+import org.apache.flink.annotation.Internal;
+
+/**
+ * A collection of Kafka consumer metrics related constant strings.
+ *
+ * <p>The names must not be changed, as that would break backward compatibility for the consumer's metrics.
+ */
+@Internal
+public class KafkaConsumerMetricConstants {
+
+	public static final String KAFKA_CONSUMER_METRICS_GROUP = "KafkaConsumer";
+
+	// ------------------------------------------------------------------------
+	//  Per-subtask metrics
+	// ------------------------------------------------------------------------
+
+	public static final String COMMITS_SUCCEEDED_METRICS_COUNTER = "commitsSucceeded";
+	public static final String COMMITS_FAILED_METRICS_COUNTER = "commitsFailed";
+
+	// ------------------------------------------------------------------------
+	//  Per-partition metrics
+	// ------------------------------------------------------------------------
+
+	public static final String OFFSETS_BY_TOPIC_METRICS_GROUP = "topic";
+	public static final String OFFSETS_BY_PARTITION_METRICS_GROUP = "partition";
+
+	public static final String CURRENT_OFFSETS_METRICS_GAUGE = "currentOffsets";
+	public static final String COMMITTED_OFFSETS_METRICS_GAUGE = "committedOffsets";
+
+	// ------------------------------------------------------------------------
+	//  Legacy metrics
+	// ------------------------------------------------------------------------
+
+	public static final String LEGACY_CURRENT_OFFSETS_METRICS_GROUP = "current-offsets";
+	public static final String LEGACY_COMMITTED_OFFSETS_METRICS_GROUP = "committed-offsets";
+
+}


[11/19] flink git commit: [FLINK-8162] [kinesis] Add unit test for Kinesis shard metrics reporting

Posted by tz...@apache.org.
[FLINK-8162] [kinesis] Add unit test for Kinesis shard metrics reporting

This closes #5182.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8e23264a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8e23264a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8e23264a

Branch: refs/heads/master
Commit: 8e23264a4511d33723a756abc209c289fafbe97d
Parents: 03841fd
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 14:34:20 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 .../kinesis/internals/ShardConsumerTest.java    | 71 +++++++++++++++-----
 .../testutils/FakeKinesisBehavioursFactory.java | 41 +++++++----
 2 files changed, 82 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/8e23264a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
index e8c5902..7ab3735 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
@@ -39,6 +39,7 @@ import java.util.LinkedList;
 import java.util.Properties;
 import java.util.concurrent.atomic.AtomicReference;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -47,15 +48,44 @@ import static org.junit.Assert.assertTrue;
 public class ShardConsumerTest {
 
 	@Test
+	public void testMetricsReporting() {
+		StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
+
+		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
+		subscribedShardsStateUnderTest.add(
+			new KinesisStreamShardState(
+				KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
+				fakeToBeConsumedShard,
+				new SequenceNumber("fakeStartingState")));
+
+		TestableKinesisDataFetcher fetcher =
+			new TestableKinesisDataFetcher(
+				Collections.singletonList("fakeStream"),
+				new Properties(),
+				10,
+				2,
+				new AtomicReference<Throwable>(),
+				subscribedShardsStateUnderTest,
+				KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
+				Mockito.mock(KinesisProxyInterface.class));
+
+		ShardMetricsReporter shardMetricsReporter = new ShardMetricsReporter();
+		long millisBehindLatest = 500L;
+		new ShardConsumer<>(
+			fetcher,
+			0,
+			subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
+			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, millisBehindLatest),
+			shardMetricsReporter).run();
+
+		// the millisBehindLatest metric should have been reported
+		assertEquals(millisBehindLatest, shardMetricsReporter.getMillisBehindLatest());
+	}
+
+	@Test
 	public void testCorrectNumOfCollectedRecordsAndUpdatedState() {
-		StreamShardHandle fakeToBeConsumedShard = new StreamShardHandle(
-			"fakeStream",
-			new Shard()
-				.withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))
-				.withHashKeyRange(
-					new HashKeyRange()
-						.withStartingHashKey("0")
-						.withEndingHashKey(new BigInteger(StringUtils.repeat("FF", 16), 16).toString())));
+		StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
 
 		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
 		subscribedShardsStateUnderTest.add(
@@ -78,7 +108,7 @@ public class ShardConsumerTest {
 			0,
 			subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
-			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9),
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, 500L),
 			new ShardMetricsReporter()).run();
 
 		assertTrue(fetcher.getNumOfElementsCollected() == 1000);
@@ -88,14 +118,7 @@ public class ShardConsumerTest {
 
 	@Test
 	public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithUnexpectedExpiredIterator() {
-		StreamShardHandle fakeToBeConsumedShard = new StreamShardHandle(
-			"fakeStream",
-			new Shard()
-				.withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))
-				.withHashKeyRange(
-					new HashKeyRange()
-						.withStartingHashKey("0")
-						.withEndingHashKey(new BigInteger(StringUtils.repeat("FF", 16), 16).toString())));
+		StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
 
 		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
 		subscribedShardsStateUnderTest.add(
@@ -120,7 +143,8 @@ public class ShardConsumerTest {
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
 			// Get a total of 1000 records with 9 getRecords() calls,
 			// and the 7th getRecords() call will encounter an unexpected expired shard iterator
-			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(1000, 9, 7),
+			FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(
+				1000, 9, 7, 500L),
 			new ShardMetricsReporter()).run();
 
 		assertTrue(fetcher.getNumOfElementsCollected() == 1000);
@@ -128,4 +152,15 @@ public class ShardConsumerTest {
 			SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get()));
 	}
 
+	private static StreamShardHandle getMockStreamShard(String streamName, int shardId) {
+		return new StreamShardHandle(
+			streamName,
+			new Shard()
+				.withShardId(KinesisShardIdGenerator.generateFromShardOrder(shardId))
+				.withHashKeyRange(
+					new HashKeyRange()
+						.withStartingHashKey("0")
+						.withEndingHashKey(new BigInteger(StringUtils.repeat("FF", 16), 16).toString())));
+	}
+
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/8e23264a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
index 61a3a6b..e403623 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
@@ -77,28 +77,39 @@ public class FakeKinesisBehavioursFactory {
 	//  Behaviours related to fetching records, used mainly in ShardConsumerTest
 	// ------------------------------------------------------------------------
 
-	public static KinesisProxyInterface totalNumOfRecordsAfterNumOfGetRecordsCalls(final int numOfRecords, final int numOfGetRecordsCalls) {
-		return new SingleShardEmittingFixNumOfRecordsKinesis(numOfRecords, numOfGetRecordsCalls);
+	public static KinesisProxyInterface totalNumOfRecordsAfterNumOfGetRecordsCalls(
+			final int numOfRecords,
+			final int numOfGetRecordsCalls,
+			final long millisBehindLatest) {
+		return new SingleShardEmittingFixNumOfRecordsKinesis(numOfRecords, numOfGetRecordsCalls, millisBehindLatest);
 	}
 
 	public static KinesisProxyInterface totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(
-		final int numOfRecords, final int numOfGetRecordsCall, final int orderOfCallToExpire) {
+			final int numOfRecords,
+			final int numOfGetRecordsCall,
+			final int orderOfCallToExpire,
+			final long millisBehindLatest) {
 		return new SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis(
-			numOfRecords, numOfGetRecordsCall, orderOfCallToExpire);
+			numOfRecords, numOfGetRecordsCall, orderOfCallToExpire, millisBehindLatest);
 	}
 
 	private static class SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis extends SingleShardEmittingFixNumOfRecordsKinesis {
 
+		private final long millisBehindLatest;
+		private final int orderOfCallToExpire;
+
 		private boolean expiredOnceAlready = false;
 		private boolean expiredIteratorRefreshed = false;
-		private final int orderOfCallToExpire;
 
-		public SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis(final int numOfRecords,
-																			final int numOfGetRecordsCalls,
-																			final int orderOfCallToExpire) {
-			super(numOfRecords, numOfGetRecordsCalls);
+		public SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis(
+				final int numOfRecords,
+				final int numOfGetRecordsCalls,
+				final int orderOfCallToExpire,
+				final long millisBehindLatest) {
+			super(numOfRecords, numOfGetRecordsCalls, millisBehindLatest);
 			checkArgument(orderOfCallToExpire <= numOfGetRecordsCalls,
 				"can not test unexpected expired iterator if orderOfCallToExpire is larger than numOfGetRecordsCalls");
+			this.millisBehindLatest = millisBehindLatest;
 			this.orderOfCallToExpire = orderOfCallToExpire;
 		}
 
@@ -116,7 +127,7 @@ public class FakeKinesisBehavioursFactory {
 				// assuming that the maxRecordsToGet is always large enough
 				return new GetRecordsResult()
 					.withRecords(shardItrToRecordBatch.get(shardIterator))
-					.withMillisBehindLatest(500L)
+					.withMillisBehindLatest(millisBehindLatest)
 					.withNextShardIterator(
 						(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls - 1)
 							? null : String.valueOf(Integer.valueOf(shardIterator) + 1)); // last next shard iterator is null
@@ -143,11 +154,17 @@ public class FakeKinesisBehavioursFactory {
 
 		protected final int totalNumOfRecords;
 
+		private final long millisBehindLatest;
+
 		protected final Map<String, List<Record>> shardItrToRecordBatch;
 
-		public SingleShardEmittingFixNumOfRecordsKinesis(final int numOfRecords, final int numOfGetRecordsCalls) {
+		public SingleShardEmittingFixNumOfRecordsKinesis(
+				final int numOfRecords,
+				final int numOfGetRecordsCalls,
+				final long millistBehindLatest) {
 			this.totalNumOfRecords = numOfRecords;
 			this.totalNumOfGetRecordsCalls = numOfGetRecordsCalls;
+			this.millisBehindLatest = millistBehindLatest;
 
 			// initialize the record batches that we will be fetched
 			this.shardItrToRecordBatch = new HashMap<>();
@@ -177,7 +194,7 @@ public class FakeKinesisBehavioursFactory {
 			// assuming that the maxRecordsToGet is always large enough
 			return new GetRecordsResult()
 				.withRecords(shardItrToRecordBatch.get(shardIterator))
-				.withMillisBehindLatest(500L)
+				.withMillisBehindLatest(millisBehindLatest)
 				.withNextShardIterator(
 					(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls - 1)
 						? null : String.valueOf(Integer.valueOf(shardIterator) + 1)); // last next shard iterator is null


[12/19] flink git commit: [FLINK-8162, FLINK-8364] [metric, doc] Improve Kafka / Kinesis metrics doc

Posted by tz...@apache.org.
[FLINK-8162, FLINK-8364] [metric, doc] Improve Kafka / Kinesis metrics doc

- Add available user variables to table
- Fix wordings to be more fluent
- Fix incorrect metric type for Kinesis millisBehindLatest
- Add description that Kafka commit failures do not affect Flink's
  checkpoint integrity.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/faaa135c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/faaa135c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/faaa135c

Branch: refs/heads/master
Commit: faaa135c9ff4e677157a9d58a91afacd64b0ca1f
Parents: 8e23264
Author: Tzu-Li (Gordon) Tai <tz...@apache.org>
Authored: Fri Jan 12 14:50:21 2018 +0800
Committer: Tzu-Li (Gordon) Tai <tz...@apache.org>
Committed: Fri Jan 12 19:43:28 2018 +0800

----------------------------------------------------------------------
 docs/monitoring/metrics.md | 54 ++++++++++++++++++++++++++---------------
 1 file changed, 34 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/faaa135c/docs/monitoring/metrics.md
----------------------------------------------------------------------
diff --git a/docs/monitoring/metrics.md b/docs/monitoring/metrics.md
index 4e296ba..00c01b7 100644
--- a/docs/monitoring/metrics.md
+++ b/docs/monitoring/metrics.md
@@ -1271,35 +1271,45 @@ Thus, in order to infer the metric identifier:
 <table class="table table-bordered">
   <thead>
     <tr>
-      <th class="text-left" style="width: 18%">Scope</th>
-      <th class="text-left" style="width: 26%">Metrics</th>
-      <th class="text-left" style="width: 48%">Description</th>
-      <th class="text-left" style="width: 8%">Type</th>
+      <th class="text-left" style="width: 15%">Scope</th>
+      <th class="text-left" style="width: 18%">Metrics</th>
+      <th class="text-left" style="width: 18%">User Variables</th>
+      <th class="text-left" style="width: 39%">Description</th>
+      <th class="text-left" style="width: 10%">Type</th>
     </tr>
   </thead>
   <tbody>
     <tr>
       <th rowspan="1">Operator</th>
       <td>commitsSucceeded</td>
-      <td>Kafka offset commit success count if Kafka commit is turned on and checkpointing is enabled.</td>
+      <td>n/a</td>
+      <td>The total number of successful offset commits to Kafka, if offset committing is turned on and checkpointing is enabled.</td>
       <td>Counter</td>
     </tr>
     <tr>
        <th rowspan="1">Operator</th>
        <td>commitsFailed</td>
-       <td>Kafka offset commit failure count if Kafka commit is turned on and checkpointing is enabled.</td>
+       <td>n/a</td>
+       <td>The total number of offset commit failures to Kafka, if offset committing is
+       turned on and checkpointing is enabled. Note that committing offsets back to Kafka
+       is only a means to expose consumer progress, so a commit failure does not affect
+       the integrity of Flink's checkpointed partition offsets.</td>
        <td>Counter</td>
     </tr>
     <tr>
-      <th rowspan="1">Operator</th>
-      <td>currentOffsets</td>
-      <td>Kafka current offset.This metric has two user-scope variables: topic, partition, which can be used to specifiy particular metric by topic name and partition id</td>
-      <td>Gauge</td>
+       <th rowspan="1">Operator</th>
+       <td>committedOffsets</td>
+       <td>topic, partition</td>
+       <td>The last successfully committed offsets to Kafka, for each partition.
+       A particular partition's metric can be specified by topic name and partition id.</td>
+       <td>Gauge</td>
     </tr>
     <tr>
       <th rowspan="1">Operator</th>
-      <td>committedOffsets</td>
-      <td>Kafka successfully committed offset if Kafka commit is turned on and checkpointing is enabled. This metric has two user-scope variables: topic, partition, which can be used to specifiy particular metric by topic name and partition id</td>
+      <td>currentOffsets</td>
+      <td>topic, partition</td>
+      <td>The consumer's current read offset, for each partition. A particular
+      partition's metric can be specified by topic name and partition id.</td>
       <td>Gauge</td>
     </tr>
   </tbody>
@@ -1309,21 +1319,25 @@ Thus, in order to infer the metric identifier:
 <table class="table table-bordered">
   <thead>
     <tr>
-      <th class="text-left" style="width: 18%">Scope</th>
-      <th class="text-left" style="width: 26%">Metrics</th>
-      <th class="text-left" style="width: 48%">Description</th>
-      <th class="text-left" style="width: 8%">Type</th>
+      <th class="text-left" style="width: 15%">Scope</th>
+      <th class="text-left" style="width: 18%">Metrics</th>
+      <th class="text-left" style="width: 18%">User Variables</th>
+      <th class="text-left" style="width: 39%">Description</th>
+      <th class="text-left" style="width: 10%">Type</th>
     </tr>
   </thead>
   <tbody>
     <tr>
       <th rowspan="1">Operator</th>
       <td>millisBehindLatest</td>
-      <td>The number of milliseconds the <a>GetRecords</a> response is from the head of the stream,
-      indicating how far behind current time the consumer is. A value of zero indicates record
-      processing is caught up, and there are no new records to process at this moment.
+      <td>stream, shardId</td>
+      <td>The number of milliseconds the consumer is behind the head of the stream,
+      indicating how far behind current time the consumer is, for each Kinesis shard.
+      A particular shard's metric can be specified by stream name and shard id.
+      A value of 0 indicates record processing is caught up, and there are no new records
+      to process at this moment. A value of -1 indicates that there is no reported value for the metric, yet.
       </td>
-      <td>Counter</td>
+      <td>Gauge</td>
     </tr>
   </tbody>
 </table>